aboutsummaryrefslogtreecommitdiff
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/Analysis/GlobalsModRef/memset-escape.ll13
-rw-r--r--llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/addrspacecast.mir35
-rw-r--r--llvm/test/Analysis/UniformityAnalysis/AMDGPU/addrspacecast.ll14
-rw-r--r--llvm/test/CodeGen/AArch64/abds-neg.ll15
-rw-r--r--llvm/test/CodeGen/AArch64/abds.ll36
-rw-r--r--llvm/test/CodeGen/AArch64/abdu-neg.ll15
-rw-r--r--llvm/test/CodeGen/AArch64/abdu.ll38
-rw-r--r--llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-ext.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/csel-subs-dag-combine.ll112
-rw-r--r--llvm/test/CodeGen/AArch64/fp8-sme2-cvtn.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir8
-rw-r--r--llvm/test/CodeGen/AArch64/framelayout-sve.mir390
-rw-r--r--llvm/test/CodeGen/AArch64/intrinsic-vector-match-sve2.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/luti-with-sme2.ll18
-rw-r--r--llvm/test/CodeGen/AArch64/midpoint-int.ll50
-rw-r--r--llvm/test/CodeGen/AArch64/perm-tb-with-sme2.ll42
-rw-r--r--llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll38
-rw-r--r--llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-cvt.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/sme2-intrinsics-qcvt.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sme2-intrinsics-qrshr.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/split-vector-insert.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/stack-hazard.ll398
-rw-r--r--llvm/test/CodeGen/AArch64/stack-probing-sve.ll152
-rw-r--r--llvm/test/CodeGen/AArch64/stacksmash-arm64ec.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/sve-alloca.ll16
-rw-r--r--llvm/test/CodeGen/AArch64/sve-callee-save-restore-pairs.ll132
-rw-r--r--llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll42
-rw-r--r--llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll18
-rw-r--r--llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-insert-element.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve-insert-vector.ll36
-rw-r--r--llvm/test/CodeGen/AArch64/sve-ldnf1.mir92
-rw-r--r--llvm/test/CodeGen/AArch64/sve-ldstnt1.mir64
-rw-r--r--llvm/test/CodeGen/AArch64/sve-llrint.ll82
-rw-r--r--llvm/test/CodeGen/AArch64/sve-lrint.ll82
-rw-r--r--llvm/test/CodeGen/AArch64/sve-pred-arith.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll14
-rw-r--r--llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll26
-rw-r--r--llvm/test/CodeGen/AArch64/sve-trunc.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve-vector-compress.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll16
-rw-r--r--llvm/test/CodeGen/AArch64/unwind-preserved.ll72
-rw-r--r--llvm/test/CodeGen/AArch64/xray-custom-log.ll14
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll60
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll120
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-addrspacecast.mir20
-rw-r--r--llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll134
-rw-r--r--llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll1486
-rw-r--r--llvm/test/CodeGen/AMDGPU/empty-text.ll9
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll1654
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-scratch.ll31
-rw-r--r--llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir3
-rw-r--r--llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir3
-rw-r--r--llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll151
-rw-r--r--llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll3
-rw-r--r--llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/literal64.ll20
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.gfx1250.w32.ll166
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imm.gfx1250.w32.ll308
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imod.gfx1250.w32.ll158
-rw-r--r--llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll27
-rw-r--r--llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll108
-rw-r--r--llvm/test/CodeGen/AMDGPU/readcyclecounter.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll43
-rw-r--r--llvm/test/CodeGen/ARM/bad-constraint.ll6
-rw-r--r--llvm/test/CodeGen/ARM/inlineasm-vec-to-double.ll14
-rw-r--r--llvm/test/CodeGen/DirectX/issue-140819_allow_forward_handle_on_alloca.ll33
-rw-r--r--llvm/test/CodeGen/Generic/allow-check.ll1
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/fpowi.ll48
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll12
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insert-extract-element.ll8
-rw-r--r--llvm/test/CodeGen/NVPTX/sext-setcc.ll13
-rw-r--r--llvm/test/CodeGen/NVPTX/trunc-setcc.ll269
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll24
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-nest-param.ll11
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-trampoline.ll22
-rw-r--r--llvm/test/CodeGen/PowerPC/check-zero-vector.ll303
-rw-r--r--llvm/test/CodeGen/PowerPC/mtvsrbmi.ll87
-rw-r--r--llvm/test/CodeGen/RISCV/features-info.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/macro-fusions.mir1376
-rw-r--r--llvm/test/CodeGen/RISCV/misched-load-clustering.ll47
-rw-r--r--llvm/test/CodeGen/RISCV/misched-mem-clustering.mir6
-rw-r--r--llvm/test/CodeGen/RISCV/misched-store-clustering.ll83
-rw-r--r--llvm/test/CodeGen/RISCV/rv32zbkb.ll139
-rw-r--r--llvm/test/CodeGen/RISCV/rv64zbkb.ll214
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vploadff.ll586
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vploadff.ll1008
-rw-r--r--llvm/test/CodeGen/RISCV/unaligned-load-store.ll20
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll75
-rw-r--r--llvm/test/CodeGen/X86/trunc-nsw-nuw.ll7
-rw-r--r--llvm/test/CodeGen/X86/xray-custom-log.ll14
-rw-r--r--llvm/test/CodeGen/Xtensa/atomic-load-store.ll498
-rw-r--r--llvm/test/CodeGen/Xtensa/atomic-rmw.ll10298
-rw-r--r--llvm/test/CodeGen/Xtensa/forced-atomics.ll1426
-rw-r--r--llvm/test/DebugInfo/X86/DW_AT_alloc_type.ll34
-rw-r--r--llvm/test/DebugInfo/X86/dwarf-callsite-related-attrs-indirect.ll78
-rw-r--r--llvm/test/DebugInfo/X86/dwarf-callsite-related-attrs.ll8
-rw-r--r--llvm/test/Instrumentation/ThreadSanitizer/capture-no-omit.ll92
-rw-r--r--llvm/test/Instrumentation/ThreadSanitizer/capture.ll2
-rw-r--r--llvm/test/MC/AArch64/armv9.6a-lsui.s72
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_ds.s1911
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_features.s32
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_operands.s54
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_sop1.s4
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_unsupported.s14
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vbuffer_mubuf.s2304
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s165
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop2_err.s13
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3cx.s3413
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_dpp16.s14
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_dpp8.s26
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vsample_err.s175
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_wmma_w32.s170
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_wmma_w32_err.s30
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_err.s30
-rw-r--r--llvm/test/MC/Disassembler/AArch64/armv9.6a-lsui.txt120
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_ds.txt1104
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_operands.txt34
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_sop1.txt3
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vbuffer_mubuf.txt2133
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3cx.txt3413
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3p_dpp16.txt10
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3p_dpp8.txt19
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_wmma_w32.txt90
-rw-r--r--llvm/test/TableGen/intrinsic-attrs.td15
-rw-r--r--llvm/test/Transforms/AtomicExpand/Xtensa/atomicrmw-expand.ll2643
-rw-r--r--llvm/test/Transforms/AtomicExpand/Xtensa/lit.local.cfg2
-rw-r--r--llvm/test/Transforms/InstCombine/load-store-forward.ll40
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/extractvalue-no-scalarization-required.ll83
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/outer_loop_test1_no_explicit_vect_width.ll125
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll30
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll24
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll45
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll36
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll50
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll16
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll43
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll32
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll22
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll56
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll56
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll10
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/cost-model.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/optsize.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/outer_loop_test1_no_explicit_vect_width.ll125
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/pr81872.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/assume.ll130
-rw-r--r--llvm/test/Transforms/LoopVectorize/dead_instructions.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-variable-size.ll224
-rw-r--r--llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/epilog-vectorization-reductions.ll189
-rw-r--r--llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll42
-rw-r--r--llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/loop-form.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/optsize.ll7
-rw-r--r--llvm/test/Transforms/LoopVectorize/outer-loop-vec-phi-predecessor-order.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/outer_loop_test1.ll66
-rw-r--r--llvm/test/Transforms/LoopVectorize/pointer-induction.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/scalable-assume.ll160
-rw-r--r--llvm/test/Transforms/LoopVectorize/scalable-predication.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll16
-rw-r--r--llvm/test/Transforms/LoopVectorize/select-reduction.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll24
-rw-r--r--llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll18
-rw-r--r--llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/uniform-blend.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/vplan-printing-outer-loop.ll4
-rw-r--r--llvm/test/Transforms/MemCpyOpt/capturing-func.ll22
-rw-r--r--llvm/test/Transforms/MemCpyOpt/memcpy-undef.ll46
-rw-r--r--llvm/test/Transforms/MemCpyOpt/memset-memcpy-oversized.ll20
-rw-r--r--llvm/test/Transforms/PGOProfile/profcheck-select.ll63
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll741
-rw-r--r--llvm/test/tools/llvm-objdump/MachO/bad-trie.test6
233 files changed, 41404 insertions, 3078 deletions
diff --git a/llvm/test/Analysis/GlobalsModRef/memset-escape.ll b/llvm/test/Analysis/GlobalsModRef/memset-escape.ll
index 77652a6..0bdc415 100644
--- a/llvm/test/Analysis/GlobalsModRef/memset-escape.ll
+++ b/llvm/test/Analysis/GlobalsModRef/memset-escape.ll
@@ -7,23 +7,14 @@ target triple = "x86_64-apple-macosx10.10.0"
@a = internal global [3 x i32] zeroinitializer, align 4
@b = common global i32 0, align 4
-; The important thing we're checking for here is the reload of (some element of)
-; @a after the memset.
+; The important thing we're checking here is that the value from the memset
+; rather than the preceding store is forwarded.
define i32 @main() {
; CHECK-LABEL: define noundef i32 @main(
; CHECK-SAME: ) local_unnamed_addr #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: store i32 1, ptr getelementptr inbounds nuw (i8, ptr @a, i64 8), align 4
-; CHECK-NEXT: tail call void @llvm.memset.p0.i64(ptr noundef nonnull align 4 dereferenceable(12) @a, i8 0, i64 12, i1 false)
; CHECK-NEXT: store i32 3, ptr @b, align 4
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr getelementptr inbounds nuw (i8, ptr @a, i64 8), align 4
-; CHECK-NEXT: [[CMP1_NOT:%.*]] = icmp eq i32 [[TMP0]], 0
-; CHECK-NEXT: br i1 [[CMP1_NOT]], label %[[IF_END:.*]], label %[[IF_THEN:.*]]
-; CHECK: [[IF_THEN]]:
-; CHECK-NEXT: tail call void @abort()
-; CHECK-NEXT: unreachable
-; CHECK: [[IF_END]]:
; CHECK-NEXT: ret i32 0
;
entry:
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/addrspacecast.mir b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/addrspacecast.mir
new file mode 100644
index 0000000..612f7b7
--- /dev/null
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/addrspacecast.mir
@@ -0,0 +1,35 @@
+# NOTE: This file is Generic MIR translation of llvm/test/Analysis/UniformityAnalysis/AMDGPU/addrspacecast.ll test file
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -run-pass=print-machine-uniformity -filetype=null %s 2>&1 | FileCheck %s --check-prefix=UNI
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 -run-pass=print-machine-uniformity -filetype=null %s 2>&1 | FileCheck %s --check-prefix=DIV
+
+# UNI: ALL VALUES UNIFORM
+# DIV: DIVERGENT: %3: %3:_(p0) = G_ADDRSPACE_CAST %2:_(p5)
+# DIV: DIVERGENT: %4: %4:_(p0) = G_INTRINSIC intrinsic(@llvm.amdgcn.addrspacecast.nonnull), %2:_(p5)
+
+--- |
+ define void @foo() {
+ %alloca = alloca i32, align 4, addrspace(5)
+ %cast = addrspacecast ptr addrspace(5) %alloca to ptr
+ store i32 1, ptr %cast, align 4
+ %cast.1 = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) %alloca)
+ store i32 2, ptr %cast.1, align 4
+ ret void
+ }
+...
+---
+name: foo
+stack:
+ - { id: 0, name: alloca, type: default, offset: 0, size: 4, alignment: 4,
+ stack-id: default, callee-saved-register: '', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+body: |
+ bb.1 (%ir-block.0):
+ %10:_(s32) = G_CONSTANT i32 1
+ %12:_(s32) = G_CONSTANT i32 2
+ %8:_(p5) = G_FRAME_INDEX %stack.0.alloca
+ %9:_(p0) = G_ADDRSPACE_CAST %8(p5)
+ G_STORE %10(s32), %9(p0) :: (store (s32) into %ir.cast)
+ %11:_(p0) = G_INTRINSIC intrinsic(@llvm.amdgcn.addrspacecast.nonnull), %8(p5)
+ G_STORE %12(s32), %11(p0) :: (store (s32) into %ir.cast.1)
+ SI_RETURN
+...
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/addrspacecast.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/addrspacecast.ll
new file mode 100644
index 0000000..e680844
--- /dev/null
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/addrspacecast.ll
@@ -0,0 +1,14 @@
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes='print<uniformity>' -disable-output %s 2>&1 | FileCheck %s --check-prefix=UNI
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 -passes='print<uniformity>' -disable-output %s 2>&1 | FileCheck %s --check-prefix=DIV
+
+; UNI: ALL VALUES UNIFORM
+; DIV: DIVERGENT: %cast = addrspacecast ptr addrspace(5) %alloca to ptr
+; DIV: DIVERGENT: %cast.1 = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) %alloca)
+define void @foo() {
+ %alloca = alloca i32, align 4, addrspace(5)
+ %cast = addrspacecast ptr addrspace(5) %alloca to ptr
+ store i32 1, ptr %cast
+ %cast.1 = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) %alloca)
+ store i32 2, ptr %cast.1
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/abds-neg.ll b/llvm/test/CodeGen/AArch64/abds-neg.ll
index 7524782..02c76ba 100644
--- a/llvm/test/CodeGen/AArch64/abds-neg.ll
+++ b/llvm/test/CodeGen/AArch64/abds-neg.ll
@@ -9,8 +9,7 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: abd_ext_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: sxtb w8, w0
-; CHECK-NEXT: sub w8, w8, w1, sxtb
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, sxtb
; CHECK-NEXT: cneg w0, w8, pl
; CHECK-NEXT: ret
%aext = sext i8 %a to i64
@@ -26,8 +25,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_ext_i8_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: sxtb w8, w0
-; CHECK-NEXT: sub w8, w8, w1, sxth
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, sxth
; CHECK-NEXT: cneg w0, w8, pl
; CHECK-NEXT: ret
%aext = sext i8 %a to i64
@@ -43,8 +41,7 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: abd_ext_i8_undef:
; CHECK: // %bb.0:
; CHECK-NEXT: sxtb w8, w0
-; CHECK-NEXT: sub w8, w8, w1, sxtb
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, sxtb
; CHECK-NEXT: cneg w0, w8, pl
; CHECK-NEXT: ret
%aext = sext i8 %a to i64
@@ -60,8 +57,7 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_ext_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: sxth w8, w0
-; CHECK-NEXT: sub w8, w8, w1, sxth
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, sxth
; CHECK-NEXT: cneg w0, w8, pl
; CHECK-NEXT: ret
%aext = sext i16 %a to i64
@@ -93,8 +89,7 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_ext_i16_undef:
; CHECK: // %bb.0:
; CHECK-NEXT: sxth w8, w0
-; CHECK-NEXT: sub w8, w8, w1, sxth
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, sxth
; CHECK-NEXT: cneg w0, w8, pl
; CHECK-NEXT: ret
%aext = sext i16 %a to i64
diff --git a/llvm/test/CodeGen/AArch64/abds.ll b/llvm/test/CodeGen/AArch64/abds.ll
index bbdb116..bf52e71 100644
--- a/llvm/test/CodeGen/AArch64/abds.ll
+++ b/llvm/test/CodeGen/AArch64/abds.ll
@@ -9,8 +9,7 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: abd_ext_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: sxtb w8, w0
-; CHECK-NEXT: sub w8, w8, w1, sxtb
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, sxtb
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%aext = sext i8 %a to i64
@@ -25,8 +24,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_ext_i8_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: sxtb w8, w0
-; CHECK-NEXT: sub w8, w8, w1, sxth
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, sxth
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%aext = sext i8 %a to i64
@@ -41,8 +39,7 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: abd_ext_i8_undef:
; CHECK: // %bb.0:
; CHECK-NEXT: sxtb w8, w0
-; CHECK-NEXT: sub w8, w8, w1, sxtb
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, sxtb
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%aext = sext i8 %a to i64
@@ -57,8 +54,7 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_ext_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: sxth w8, w0
-; CHECK-NEXT: sub w8, w8, w1, sxth
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, sxth
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%aext = sext i16 %a to i64
@@ -88,8 +84,7 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_ext_i16_undef:
; CHECK: // %bb.0:
; CHECK-NEXT: sxth w8, w0
-; CHECK-NEXT: sub w8, w8, w1, sxth
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, sxth
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%aext = sext i16 %a to i64
@@ -215,8 +210,7 @@ define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: abd_minmax_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: sxtb w8, w0
-; CHECK-NEXT: sub w8, w8, w1, sxtb
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, sxtb
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%min = call i8 @llvm.smin.i8(i8 %a, i8 %b)
@@ -229,8 +223,7 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_minmax_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: sxth w8, w0
-; CHECK-NEXT: sub w8, w8, w1, sxth
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, sxth
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%min = call i16 @llvm.smin.i16(i16 %a, i16 %b)
@@ -287,8 +280,7 @@ define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: abd_cmp_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: sxtb w8, w0
-; CHECK-NEXT: sub w8, w8, w1, sxtb
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, sxtb
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%cmp = icmp sgt i8 %a, %b
@@ -302,8 +294,7 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_cmp_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: sxth w8, w0
-; CHECK-NEXT: sub w8, w8, w1, sxth
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, sxth
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%cmp = icmp sge i16 %a, %b
@@ -508,9 +499,8 @@ define i64 @vector_legalized(i16 %a, i16 %b) {
; CHECK: // %bb.0:
; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: sxth w8, w0
-; CHECK-NEXT: sub w8, w8, w1, sxth
+; CHECK-NEXT: subs w8, w8, w1, sxth
; CHECK-NEXT: addp d0, v0.2d
-; CHECK-NEXT: cmp w8, #0
; CHECK-NEXT: cneg w8, w8, mi
; CHECK-NEXT: fmov x9, d0
; CHECK-NEXT: add x0, x9, x8
@@ -533,8 +523,7 @@ define i8 @abd_select_i8(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: abd_select_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: sxtb w8, w0
-; CHECK-NEXT: sub w8, w8, w1, sxtb
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, sxtb
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%cmp = icmp slt i8 %a, %b
@@ -548,8 +537,7 @@ define i16 @abd_select_i16(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_select_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: sxth w8, w0
-; CHECK-NEXT: sub w8, w8, w1, sxth
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, sxth
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%cmp = icmp sle i16 %a, %b
diff --git a/llvm/test/CodeGen/AArch64/abdu-neg.ll b/llvm/test/CodeGen/AArch64/abdu-neg.ll
index d07f099a..400031b 100644
--- a/llvm/test/CodeGen/AArch64/abdu-neg.ll
+++ b/llvm/test/CodeGen/AArch64/abdu-neg.ll
@@ -9,8 +9,7 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: abd_ext_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xff
-; CHECK-NEXT: sub w8, w8, w1, uxtb
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, uxtb
; CHECK-NEXT: cneg w0, w8, pl
; CHECK-NEXT: ret
%aext = zext i8 %a to i64
@@ -26,8 +25,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_ext_i8_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xff
-; CHECK-NEXT: sub w8, w8, w1, uxth
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, uxth
; CHECK-NEXT: cneg w0, w8, pl
; CHECK-NEXT: ret
%aext = zext i8 %a to i64
@@ -43,8 +41,7 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: abd_ext_i8_undef:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xff
-; CHECK-NEXT: sub w8, w8, w1, uxtb
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, uxtb
; CHECK-NEXT: cneg w0, w8, pl
; CHECK-NEXT: ret
%aext = zext i8 %a to i64
@@ -60,8 +57,7 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_ext_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xffff
-; CHECK-NEXT: sub w8, w8, w1, uxth
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, uxth
; CHECK-NEXT: cneg w0, w8, pl
; CHECK-NEXT: ret
%aext = zext i16 %a to i64
@@ -93,8 +89,7 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_ext_i16_undef:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xffff
-; CHECK-NEXT: sub w8, w8, w1, uxth
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, uxth
; CHECK-NEXT: cneg w0, w8, pl
; CHECK-NEXT: ret
%aext = zext i16 %a to i64
diff --git a/llvm/test/CodeGen/AArch64/abdu.ll b/llvm/test/CodeGen/AArch64/abdu.ll
index 1045ee2..8d2b0b0 100644
--- a/llvm/test/CodeGen/AArch64/abdu.ll
+++ b/llvm/test/CodeGen/AArch64/abdu.ll
@@ -9,8 +9,7 @@ define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: abd_ext_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xff
-; CHECK-NEXT: sub w8, w8, w1, uxtb
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, uxtb
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%aext = zext i8 %a to i64
@@ -25,8 +24,7 @@ define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_ext_i8_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xff
-; CHECK-NEXT: sub w8, w8, w1, uxth
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, uxth
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%aext = zext i8 %a to i64
@@ -41,8 +39,7 @@ define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: abd_ext_i8_undef:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xff
-; CHECK-NEXT: sub w8, w8, w1, uxtb
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, uxtb
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%aext = zext i8 %a to i64
@@ -57,8 +54,7 @@ define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_ext_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xffff
-; CHECK-NEXT: sub w8, w8, w1, uxth
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, uxth
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%aext = zext i16 %a to i64
@@ -88,8 +84,7 @@ define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_ext_i16_undef:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xffff
-; CHECK-NEXT: sub w8, w8, w1, uxth
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, uxth
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%aext = zext i16 %a to i64
@@ -219,8 +214,7 @@ define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: abd_minmax_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xff
-; CHECK-NEXT: sub w8, w8, w1, uxtb
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, uxtb
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%min = call i8 @llvm.umin.i8(i8 %a, i8 %b)
@@ -233,8 +227,7 @@ define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_minmax_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xffff
-; CHECK-NEXT: sub w8, w8, w1, uxth
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, uxth
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%min = call i16 @llvm.umin.i16(i16 %a, i16 %b)
@@ -293,8 +286,7 @@ define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: abd_cmp_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xff
-; CHECK-NEXT: sub w8, w8, w1, uxtb
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, uxtb
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%cmp = icmp ugt i8 %a, %b
@@ -308,8 +300,7 @@ define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_cmp_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xffff
-; CHECK-NEXT: sub w8, w8, w1, uxth
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, uxth
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%cmp = icmp uge i16 %a, %b
@@ -373,10 +364,9 @@ define i64 @vector_legalized(i16 %a, i16 %b) {
; CHECK: // %bb.0:
; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: and w8, w0, #0xffff
-; CHECK-NEXT: sub w8, w8, w1, uxth
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: addp d0, v0.2d
+; CHECK-NEXT: subs w8, w8, w1, uxth
; CHECK-NEXT: cneg w8, w8, mi
+; CHECK-NEXT: addp d0, v0.2d
; CHECK-NEXT: fmov x9, d0
; CHECK-NEXT: add x0, x9, x8
; CHECK-NEXT: ret
@@ -398,8 +388,7 @@ define i8 @abd_select_i8(i8 %a, i8 %b) nounwind {
; CHECK-LABEL: abd_select_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xff
-; CHECK-NEXT: sub w8, w8, w1, uxtb
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, uxtb
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%cmp = icmp ult i8 %a, %b
@@ -413,8 +402,7 @@ define i16 @abd_select_i16(i16 %a, i16 %b) nounwind {
; CHECK-LABEL: abd_select_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xffff
-; CHECK-NEXT: sub w8, w8, w1, uxth
-; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: subs w8, w8, w1, uxth
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%cmp = icmp ule i16 %a, %b
diff --git a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll
index 3a808f5..dd018a6 100644
--- a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll
+++ b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-array.ll
@@ -11,7 +11,7 @@ define void @array_1D(ptr %addr) #0 {
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-3
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ldr z1, [x0, #2, mul vl]
@@ -34,7 +34,7 @@ define %my_subtype @array_1D_extract(ptr %addr) #0 {
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-3
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ldr z0, [x0, #1, mul vl]
; CHECK-NEXT: addvl sp, sp, #3
@@ -52,7 +52,7 @@ define void @array_1D_insert(ptr %addr, %my_subtype %elt) #0 {
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-3
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ldr z1, [x0, #2, mul vl]
; CHECK-NEXT: ldr z2, [x0]
@@ -75,7 +75,7 @@ define void @array_2D(ptr %addr) #0 {
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-6
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 48 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x30, 0x1e, 0x22 // sp + 16 + 48 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ldr z1, [x0, #5, mul vl]
diff --git a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll
index e7d8f4f..be73dc9 100644
--- a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll
+++ b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll
@@ -10,7 +10,7 @@ define void @test(ptr %addr) #0 {
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-3
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ldr z1, [x0, #2, mul vl]
diff --git a/llvm/test/CodeGen/AArch64/arm64-ext.ll b/llvm/test/CodeGen/AArch64/arm64-ext.ll
index 8bf2b82..c367057 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ext.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ext.ll
@@ -139,9 +139,8 @@ define <2 x ptr> @test_v2p0(<2 x ptr> %a, <2 x ptr> %b) {
define <16 x i8> @reverse_vector_s8x16b(<16 x i8> noundef %x) {
; CHECK-SD-LABEL: reverse_vector_s8x16b:
; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: rev64 v1.16b, v0.16b
-; CHECK-SD-NEXT: ext v0.16b, v1.16b, v1.16b, #8
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: rev64 v0.16b, v0.16b
+; CHECK-SD-NEXT: ext v0.16b, v0.16b, v0.16b, #8
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: reverse_vector_s8x16b:
@@ -161,9 +160,8 @@ entry:
define <8 x i16> @reverse_vector_s16x8b(<8 x i16> noundef %x) {
; CHECK-SD-LABEL: reverse_vector_s16x8b:
; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: rev64 v1.8h, v0.8h
-; CHECK-SD-NEXT: ext v0.16b, v1.16b, v1.16b, #8
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: rev64 v0.8h, v0.8h
+; CHECK-SD-NEXT: ext v0.16b, v0.16b, v0.16b, #8
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: reverse_vector_s16x8b:
diff --git a/llvm/test/CodeGen/AArch64/csel-subs-dag-combine.ll b/llvm/test/CodeGen/AArch64/csel-subs-dag-combine.ll
new file mode 100644
index 0000000..5036be9
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/csel-subs-dag-combine.ll
@@ -0,0 +1,112 @@
+; RUN: llc -debug-only=isel -o /dev/null < %s 2>&1 | FileCheck %s
+
+; REQUIRES: asserts
+
+; These tests ensure that we don't combine
+; CSEL a, b, cc, SUBS(SUB(x,y), 0) -> CSEL a, b, cc, SUBS(x,y)
+; if the flags set by SUBS(SUB(x,y), 0) have more than one use.
+;
+; This restriction exists because combining SUBS(SUB(x,y), 0) -> SUBS(x,y) is
+; only valid if there are no users of the overflow flags (C/V) generated by the
+; SUBS. Currently, we only check the flags used by the CSEL, and therefore we
+; conservatively reject cases where the SUBS's flags have other uses.
+
+target triple = "aarch64-unknown-linux-gnu"
+
+; CHECK-LABEL: Legalized selection DAG: %bb.0 'combine_subs:'
+; CHECK-NEXT: SelectionDAG has 13 nodes:
+; CHECK-NEXT: t0: ch,glue = EntryToken
+; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0
+; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1
+; CHECK-NEXT: t5: i32 = sub t2, t4
+; CHECK-NEXT: t14: i32,i32 = AArch64ISD::SUBS t5, Constant:i32<0>
+; CHECK-NEXT: t16: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t14:1
+; CHECK-NEXT: t11: ch,glue = CopyToReg t0, Register:i32 $w0, t16
+; CHECK-NEXT: t12: ch = AArch64ISD::RET_GLUE t11, Register:i32 $w0, t11:1
+
+; CHECK-LABEL: Optimized legalized selection DAG: %bb.0 'combine_subs:'
+; CHECK-NEXT: SelectionDAG has 11 nodes:
+; CHECK-NEXT: t0: ch,glue = EntryToken
+; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0
+; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1
+; CHECK-NEXT: t18: i32,i32 = AArch64ISD::SUBS t2, t4
+; CHECK-NEXT: t16: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t18:1
+; CHECK-NEXT: t11: ch,glue = CopyToReg t0, Register:i32 $w0, t16
+; CHECK-NEXT: t12: ch = AArch64ISD::RET_GLUE t11, Register:i32 $w0, t11:1
+
+define i32 @combine_subs(i32 %a, i32 %b) {
+ %sub = sub i32 %a, %b
+ %cc = icmp ne i32 %sub, 0
+ %sel = select i1 %cc, i32 %a, i32 %b
+ ret i32 %sel
+}
+
+; CHECK-LABEL: Legalized selection DAG: %bb.0 'combine_subs_multiple_sub_uses:'
+; CHECK-NEXT: SelectionDAG has 14 nodes:
+; CHECK-NEXT: t0: ch,glue = EntryToken
+; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0
+; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1
+; CHECK-NEXT: t5: i32 = sub t2, t4
+; CHECK-NEXT: t15: i32,i32 = AArch64ISD::SUBS t5, Constant:i32<0>
+; CHECK-NEXT: t17: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t15:1
+; CHECK-NEXT: t10: i32 = add t17, t5
+; CHECK-NEXT: t12: ch,glue = CopyToReg t0, Register:i32 $w0, t10
+; CHECK-NEXT: t13: ch = AArch64ISD::RET_GLUE t12, Register:i32 $w0, t12:1
+
+; CHECK-LABEL: Optimized legalized selection DAG: %bb.0 'combine_subs_multiple_sub_uses:'
+; CHECK-NEXT: SelectionDAG has 12 nodes:
+; CHECK-NEXT: t0: ch,glue = EntryToken
+; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0
+; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1
+; CHECK-NEXT: t17: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t19:1
+; CHECK-NEXT: t10: i32 = add t17, t19
+; CHECK-NEXT: t12: ch,glue = CopyToReg t0, Register:i32 $w0, t10
+; CHECK-NEXT: t19: i32,i32 = AArch64ISD::SUBS t2, t4
+; CHECK-NEXT: t13: ch = AArch64ISD::RET_GLUE t12, Register:i32 $w0, t12:1
+
+define i32 @combine_subs_multiple_sub_uses(i32 %a, i32 %b) {
+ %sub = sub i32 %a, %b
+ %cc = icmp ne i32 %sub, 0
+ %sel = select i1 %cc, i32 %a, i32 %b
+ %add = add i32 %sel, %sub
+ ret i32 %add
+}
+
+; CHECK-LABEL: Legalized selection DAG: %bb.0 'do_not_combine_subs_multiple_flag_uses:'
+; CHECK-NEXT: SelectionDAG has 19 nodes:
+; CHECK-NEXT: t0: ch,glue = EntryToken
+; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0
+; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1
+; CHECK-NEXT: t24: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t21:1
+; CHECK-NEXT: t6: i32,ch = CopyFromReg t0, Register:i32 %2
+; CHECK-NEXT: t8: i32,ch = CopyFromReg t0, Register:i32 %3
+; CHECK-NEXT: t23: i32 = AArch64ISD::CSEL t6, t8, Constant:i32<1>, t21:1
+; CHECK-NEXT: t15: i32 = add t24, t23
+; CHECK-NEXT: t17: ch,glue = CopyToReg t0, Register:i32 $w0, t15
+; CHECK-NEXT: t9: i32 = sub t2, t4
+; CHECK-NEXT: t21: i32,i32 = AArch64ISD::SUBS t9, Constant:i32<0>
+; CHECK-NEXT: t18: ch = AArch64ISD::RET_GLUE t17, Register:i32 $w0, t17:1
+
+; CHECK-LABEL: Optimized legalized selection DAG: %bb.0 'do_not_combine_subs_multiple_flag_uses:'
+; CHECK-NEXT: SelectionDAG has 19 nodes:
+; CHECK-NEXT: t0: ch,glue = EntryToken
+; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %0
+; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %1
+; CHECK-NEXT: t24: i32 = AArch64ISD::CSEL t2, t4, Constant:i32<1>, t21:1
+; CHECK-NEXT: t6: i32,ch = CopyFromReg t0, Register:i32 %2
+; CHECK-NEXT: t8: i32,ch = CopyFromReg t0, Register:i32 %3
+; CHECK-NEXT: t23: i32 = AArch64ISD::CSEL t6, t8, Constant:i32<1>, t21:1
+; CHECK-NEXT: t15: i32 = add t24, t23
+; CHECK-NEXT: t17: ch,glue = CopyToReg t0, Register:i32 $w0, t15
+; CHECK-NEXT: t9: i32 = sub t2, t4
+; CHECK-NEXT: t21: i32,i32 = AArch64ISD::SUBS t9, Constant:i32<0>
+; CHECK-NEXT: t18: ch = AArch64ISD::RET_GLUE t17, Register:i32 $w0, t17:1
+
+define i32 @do_not_combine_subs_multiple_flag_uses(i32 %a, i32 %b, i32 %c, i32 %d) {
+ %sub = sub i32 %a, %b
+ %cc = icmp ne i32 %sub, 0
+ %sel = select i1 %cc, i32 %a, i32 %b
+ %other = select i1 %cc, i32 %c, i32 %d
+ %add = add i32 %sel, %other
+ ret i32 %add
+}
diff --git a/llvm/test/CodeGen/AArch64/fp8-sme2-cvtn.ll b/llvm/test/CodeGen/AArch64/fp8-sme2-cvtn.ll
index d1e0729..6a91d85 100644
--- a/llvm/test/CodeGen/AArch64/fp8-sme2-cvtn.ll
+++ b/llvm/test/CodeGen/AArch64/fp8-sme2-cvtn.ll
@@ -11,10 +11,10 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @cvtn_f16_tuple(i64 %stride, p
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 16 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 16 * VG - 16
; CHECK-NEXT: ptrue pn8.b
; CHECK-NEXT: add x8, x1, x0
; CHECK-NEXT: ld1h { z2.h, z10.h }, pn8/z, [x1]
@@ -52,10 +52,10 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @cvtnt_f32_tuple(i64 %stride,
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 16 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 16 * VG - 16
; CHECK-NEXT: ptrue pn8.b
; CHECK-NEXT: add x8, x1, x0
; CHECK-NEXT: mov z1.d, z0.d
diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir
index aed3145..e970d83 100644
--- a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir
+++ b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir
@@ -9,16 +9,16 @@
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl sp, sp, #-2
- ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+ ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill
- ; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
+ ; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
; CHECK-NEXT: addvl sp, sp, #-1
- ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+ ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: // implicit-def: $z8
; CHECK-NEXT: // implicit-def: $p4
; CHECK-NEXT: addvl sp, sp, #1
- ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+ ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: addvl sp, sp, #2
diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve.mir b/llvm/test/CodeGen/AArch64/framelayout-sve.mir
index 17b1ad2..03a6aab 100644
--- a/llvm/test/CodeGen/AArch64/framelayout-sve.mir
+++ b/llvm/test/CodeGen/AArch64/framelayout-sve.mir
@@ -64,7 +64,7 @@
# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0
# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 32
# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2
# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 32
@@ -79,7 +79,8 @@
# ASM: .cfi_def_cfa_offset 16
# ASM-NEXT: .cfi_offset w29, -16
# ASM: .cfi_def_cfa_offset 32
-# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 16 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 32 + 16 * VG
# ASM: .cfi_def_cfa wsp, 32
# ASM: .cfi_def_cfa_offset 16
# ASM: .cfi_def_cfa_offset 0
@@ -88,8 +89,8 @@
#
# UNWINDINFO: DW_CFA_def_cfa_offset: +16
# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
-# UNWINDINFO: DW_CFA_def_cfa_offset: +32
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_offset: +32
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_lit16, DW_OP_mul, DW_OP_plus
# UNWINDINFO: DW_CFA_def_cfa: reg31 +32
# UNWINDINFO: DW_CFA_def_cfa_offset: +16
# UNWINDINFO: DW_CFA_def_cfa_offset: +0
@@ -129,7 +130,7 @@ body: |
# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0
# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 48
# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
#
# CHECK-NEXT: $x20 = IMPLICIT_DEF
@@ -152,7 +153,8 @@ body: |
# ASM-NEXT: .cfi_offset w21, -16
# ASM-NEXT: .cfi_offset w29, -32
# ASM: .cfi_def_cfa_offset 48
-# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 16 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 48 + 16 * VG
#
# ASM: .cfi_def_cfa wsp, 48
# ASM: .cfi_def_cfa_offset 32
@@ -166,9 +168,8 @@ body: |
# UNWINDINFO: DW_CFA_offset: reg20 -8
# UNWINDINFO-NEXT: DW_CFA_offset: reg21 -16
# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -32
-# UNWINDINFO: DW_CFA_def_cfa_offset: +48
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-#
+# UNWINDINFO: DW_CFA_def_cfa_offset: +48
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +48, DW_OP_bregx 0x2e +0, DW_OP_lit16, DW_OP_mul, DW_OP_plus
# UNWINDINFO: DW_CFA_def_cfa: reg31 +48
# UNWINDINFO: DW_CFA_def_cfa_offset: +32
# UNWINDINFO: DW_CFA_def_cfa_offset: +0
@@ -272,7 +273,7 @@ body: |
# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0
# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 32
# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK-NEXT: $[[TMP:x[0-9]+]] = ADDXri $sp, 16
# CHECK-NEXT: STR_ZXI $z0, killed $[[TMP]], 2
@@ -295,7 +296,8 @@ body: |
# ASM: .cfi_def_cfa_offset 16
# ASM-NEXT: .cfi_offset w29, -16
# ASM: .cfi_def_cfa_offset 32
-# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 24 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 32 + 24 * VG
#
# ASM: .cfi_def_cfa wsp, 32
# ASM: .cfi_def_cfa_offset 16
@@ -305,7 +307,7 @@ body: |
# UNWINDINFO: DW_CFA_def_cfa_offset: +16
# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
# UNWINDINFO: DW_CFA_def_cfa_offset: +32
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus
#
# UNWINDINFO: DW_CFA_def_cfa: reg31 +32
# UNWINDINFO: DW_CFA_def_cfa_offset: +16
@@ -434,7 +436,7 @@ body: |
# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 16, 0
# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 32
# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK: $[[TMP:x[0-9]+]] = ADDVL_XXI $sp, 1
# CHECK-NEXT: $x0 = LDRXui killed $[[TMP]], 4
@@ -451,7 +453,8 @@ body: |
# ASM: .cfi_def_cfa_offset 16
# ASM-NEXT: .cfi_offset w29, -16
# ASM: .cfi_def_cfa_offset 32
-# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 32 + 8 * VG
#
# ASM: .cfi_def_cfa wsp, 32
# ASM: .cfi_def_cfa_offset 16
@@ -461,7 +464,7 @@ body: |
# UNWINDINFO: DW_CFA_def_cfa_offset: +16
# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
# UNWINDINFO: DW_CFA_def_cfa_offset: +32
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
#
# UNWINDINFO: DW_CFA_def_cfa: reg31 +32
# UNWINDINFO: DW_CFA_def_cfa_offset: +16
@@ -504,23 +507,23 @@ body: |
# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK-NEXT: $[[TMP2:x[0-9]+]] = ADDVL_XXI $sp, 1
# CHECK-NEXT: STR_ZXI $z0, killed $[[TMP2]], 255
@@ -529,21 +532,21 @@ body: |
# CHECK-NEXT: STR_PXI $p0, killed $[[TMP2]], 255
# CHECK: $sp = frame-destroy ADDVL_XXI $sp, 31
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape
# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x98, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape
# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape
# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa8, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape
# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb0, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape
# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb8, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape
# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape
# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape
# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 9
# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
# CHECK-NEXT: $sp, $[[SCRATCH]] = frame-destroy LDRXpost $sp, 16
@@ -554,48 +557,65 @@ body: |
# ASM-LABEL: test_address_sve_out_of_range:
# ASM: .cfi_def_cfa_offset 16
# ASM-NEXT: .cfi_offset w29, -16
-# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 256 * VG
-# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 512 * VG
-# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 768 * VG
-# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1024 * VG
-# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1280 * VG
-# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1536 * VG
-# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1792 * VG
-# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 2048 * VG
-# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 2056 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 256 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 512 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 768 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 1024 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 1280 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 1536 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 1792 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 2048 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 2056 * VG
#
-# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1808 * VG
-# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x98, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1560 * VG
-# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1312 * VG
-# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa8, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1064 * VG
-# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb0, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 816 * VG
-# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb8, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 568 * VG
-# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 320 * VG
-# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 1808 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 1560 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 1312 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 1064 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 816 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 568 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 320 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 72 * VG
# ASM: .cfi_def_cfa wsp, 16
# ASM: .cfi_def_cfa_offset 0
# ASM-NEXT: .cfi_restore w29
# UNWINDINFO: DW_CFA_def_cfa_offset: +16
# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +256, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +512, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +768, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1024, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1280, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1536, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1792, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +2048, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +2056, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +256, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +512, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +768, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1024, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1280, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1536, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1792, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +2048, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +2056, DW_OP_mul, DW_OP_plus
#
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1808, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1560, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1312, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +1064, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +816, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +568, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +320, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +72, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1808, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1560, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1312, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +1064, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +816, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +568, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +320, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +72, DW_OP_mul, DW_OP_plus
# UNWINDINFO: DW_CFA_def_cfa: reg31 +16
# UNWINDINFO: DW_CFA_def_cfa_offset: +0
# UNWINDINFO-NEXT: DW_CFA_restore: reg29
@@ -702,15 +722,15 @@ body: |
# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
# CHECK: $sp = frame-setup ADDVL_XXI $sp, -1
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK: frame-setup STR_PXI killed $p6, $sp, 5
# CHECK: frame-setup STR_PXI killed $p5, $sp, 6
# CHECK: frame-setup STR_PXI killed $p4, $sp, 7
# CHECK: $sp = frame-setup SUBXri $sp, 32, 0
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK: $sp = frame-destroy ADDXri $sp, 32, 0
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape
# CHECK: $p6 = frame-destroy LDR_PXI $sp, 5
# CHECK: $p5 = frame-destroy LDR_PXI $sp, 6
# CHECK: $p4 = frame-destroy LDR_PXI $sp, 7
@@ -725,20 +745,23 @@ body: |
# ASM-LABEL: save_restore_pregs_sve:
# ASM: .cfi_def_cfa_offset 16
# ASM-NEXT: .cfi_offset w29, -16
-# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
-# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 8 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 8 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 48 + 8 * VG
#
-# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 8 * VG
# ASM: .cfi_def_cfa wsp, 16
# ASM: .cfi_def_cfa_offset 0
# ASM-NEXT: .cfi_restore w29
# UNWINDINFO: DW_CFA_def_cfa_offset: +16
# UNWINDINFO: DW_CFA_offset: reg29 -16
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +48, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
#
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
# UNWINDINFO: DW_CFA_def_cfa: reg31 +16
# UNWINDINFO: DW_CFA_def_cfa_offset: +0
# UNWINDINFO-NEXT: DW_CFA_restore: reg29
@@ -761,18 +784,18 @@ body: |
# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK-NEXT: frame-setup STR_ZXI killed $z10, $sp, 0
# CHECK-NEXT: frame-setup STR_ZXI killed $z9, $sp, 1
# CHECK-NEXT: frame-setup STR_ZXI killed $z8, $sp, 2
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 32, 0
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK: $sp = frame-destroy ADDXri $sp, 32, 0
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape
# CHECK-NEXT: $z10 = frame-destroy LDR_ZXI $sp, 0
# CHECK-NEXT: $z9 = frame-destroy LDR_ZXI $sp, 1
# CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2
@@ -789,13 +812,19 @@ body: |
# ASM-LABEL: save_restore_zregs_sve:
# ASM: .cfi_def_cfa_offset 16
# ASM-NEXT: .cfi_offset w29, -16
-# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
-# ASM: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-# ASM-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-# ASM-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
-# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 24 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 24 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // $d8 @ cfa - 8 * VG - 16
+# ASM-NEXT: .cfi_escape
+# ASM-SAME: // $d9 @ cfa - 16 * VG - 16
+# ASM-NEXT: .cfi_escape
+# ASM-SAME: // $d10 @ cfa - 24 * VG - 16
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 48 + 24 * VG
#
-# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 24 * VG
# ASM: .cfi_def_cfa wsp, 16
# ASM-NEXT: .cfi_restore z8
# ASM-NEXT: .cfi_restore z9
@@ -805,13 +834,13 @@ body: |
# UNWINDINFO: DW_CFA_def_cfa_offset: +16
# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +48, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_bregx 0x2e +0, DW_OP_consts -8, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus
+# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_bregx 0x2e +0, DW_OP_consts -16, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus
+# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_bregx 0x2e +0, DW_OP_consts -24, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +48, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus
#
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus
# UNWINDINFO: DW_CFA_def_cfa: reg31 +16
# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg104
# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg105
@@ -848,7 +877,7 @@ body: |
# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -32
# CHECK: $sp = frame-setup ADDVL_XXI $sp, -18
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK: frame-setup STR_PXI killed $p15, $sp, 4
# CHECK: frame-setup STR_PXI killed $p14, $sp, 5
# CHECK: frame-setup STR_PXI killed $p5, $sp, 14
@@ -857,23 +886,23 @@ body: |
# CHECK: frame-setup STR_ZXI killed $z22, $sp, 3
# CHECK: frame-setup STR_ZXI killed $z9, $sp, 16
# CHECK: frame-setup STR_ZXI killed $z8, $sp, 17
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4b, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4c, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4d, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4e, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4f, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK: $sp = frame-setup SUBXri $sp, 32, 0
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK: $sp = frame-setup ADDVL_XXI $sp, -1
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK: $sp = frame-destroy ADDXri $sp, 32, 0
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape
# CHECK: $sp = frame-destroy ADDVL_XXI $sp, 1
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape
# CHECK: $z23 = frame-destroy LDR_ZXI $sp, 2
# CHECK: $z22 = frame-destroy LDR_ZXI $sp, 3
# CHECK: $z9 = frame-destroy LDR_ZXI $sp, 16
@@ -909,20 +938,33 @@ body: |
# ASM-NEXT: .cfi_offset w20, -16
# ASM-NEXT: .cfi_offset w21, -24
# ASM-NEXT: .cfi_offset w29, -32
-# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG
-# ASM: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 32 - 8 * VG
-# ASM-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 32 - 16 * VG
-# ASM-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 32 - 24 * VG
-# ASM-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 32 - 32 * VG
-# ASM-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 32 - 40 * VG
-# ASM-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 32 - 48 * VG
-# ASM-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 32 - 56 * VG
-# ASM-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 32 - 64 * VG
-# ASM: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 64 + 144 * VG
-# ASM: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 64 + 152 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 32 + 144 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // $d8 @ cfa - 8 * VG - 32
+# ASM-NEXT: .cfi_escape
+# ASM-SAME: // $d9 @ cfa - 16 * VG - 32
+# ASM-NEXT: .cfi_escape
+# ASM-SAME: // $d10 @ cfa - 24 * VG - 32
+# ASM-NEXT: .cfi_escape
+# ASM-SAME: // $d11 @ cfa - 32 * VG - 32
+# ASM-NEXT: .cfi_escape
+# ASM-SAME: // $d12 @ cfa - 40 * VG - 32
+# ASM-NEXT: .cfi_escape
+# ASM-SAME: // $d13 @ cfa - 48 * VG - 32
+# ASM-NEXT: .cfi_escape
+# ASM-SAME: // $d14 @ cfa - 56 * VG - 32
+# ASM-NEXT: .cfi_escape
+# ASM-SAME: // $d15 @ cfa - 64 * VG - 32
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 64 + 144 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 64 + 152 * VG
#
-# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 152 * VG
-# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 32 + 152 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 32 + 144 * VG
# ASM: .cfi_def_cfa wsp, 32
# ASM-NEXT: .cfi_restore z8
# ASM-NEXT: .cfi_restore z9
@@ -943,20 +985,20 @@ body: |
# UNWINDINFO-NEXT: DW_CFA_offset: reg20 -16
# UNWINDINFO-NEXT: DW_CFA_offset: reg21 -24
# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -32
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +144, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO-NEXT: DW_CFA_expression: reg75 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -32, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO-NEXT: DW_CFA_expression: reg76 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -40, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO-NEXT: DW_CFA_expression: reg77 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -48, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO-NEXT: DW_CFA_expression: reg78 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -56, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_consts -32, DW_OP_plus, DW_OP_consts -64, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +64, DW_OP_plus, DW_OP_consts +144, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +64, DW_OP_plus, DW_OP_consts +152, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_consts +144, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_bregx 0x2e +0, DW_OP_consts -8, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus
+# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_bregx 0x2e +0, DW_OP_consts -16, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus
+# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_bregx 0x2e +0, DW_OP_consts -24, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus
+# UNWINDINFO-NEXT: DW_CFA_expression: reg75 DW_OP_bregx 0x2e +0, DW_OP_consts -32, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus
+# UNWINDINFO-NEXT: DW_CFA_expression: reg76 DW_OP_bregx 0x2e +0, DW_OP_consts -40, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus
+# UNWINDINFO-NEXT: DW_CFA_expression: reg77 DW_OP_bregx 0x2e +0, DW_OP_consts -48, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus
+# UNWINDINFO-NEXT: DW_CFA_expression: reg78 DW_OP_bregx 0x2e +0, DW_OP_consts -56, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus
+# UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_bregx 0x2e +0, DW_OP_consts -64, DW_OP_mul, DW_OP_plus, DW_OP_consts -32, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +64, DW_OP_bregx 0x2e +0, DW_OP_consts +144, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +64, DW_OP_bregx 0x2e +0, DW_OP_consts +152, DW_OP_mul, DW_OP_plus
#
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +152, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +32, DW_OP_plus, DW_OP_consts +144, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_consts +152, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +32, DW_OP_bregx 0x2e +0, DW_OP_consts +144, DW_OP_mul, DW_OP_plus
# UNWINDINFO: DW_CFA_def_cfa: reg31 +32
# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg104
# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg105
@@ -1025,14 +1067,14 @@ body: |
# CHECK-NEXT: STR_ZXI killed $z22, $sp, 3
# CHECK: STR_ZXI killed $z9, $sp, 16
# CHECK-NEXT: STR_ZXI killed $z8, $sp, 17
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK-NEXT: $[[TMP:x[0-9]+]] = frame-setup SUBXri $sp, 16, 0
# CHECK-NEXT: $[[TMP]] = frame-setup ADDVL_XXI $[[TMP]], -1
# CHECK-NEXT: $sp = frame-setup ANDXri killed $[[TMP]]
@@ -1067,14 +1109,22 @@ body: |
# ASM: .cfi_def_cfa w29, 16
# ASM-NEXT: .cfi_offset w30, -8
# ASM-NEXT: .cfi_offset w29, -16
-# ASM: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-# ASM-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-# ASM-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
-# ASM-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
-# ASM-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG
-# ASM-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
-# ASM-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
-# ASM-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // $d8 @ cfa - 8 * VG - 16
+# ASM-NEXT: .cfi_escape
+# ASM-SAME: // $d9 @ cfa - 16 * VG - 16
+# ASM-NEXT: .cfi_escape
+# ASM-SAME: // $d10 @ cfa - 24 * VG - 16
+# ASM-NEXT: .cfi_escape
+# ASM-SAME: // $d11 @ cfa - 32 * VG - 16
+# ASM-NEXT: .cfi_escape
+# ASM-SAME: // $d12 @ cfa - 40 * VG - 16
+# ASM-NEXT: .cfi_escape
+# ASM-SAME: // $d13 @ cfa - 48 * VG - 16
+# ASM-NEXT: .cfi_escape
+# ASM-SAME: // $d14 @ cfa - 56 * VG - 16
+# ASM-NEXT: .cfi_escape
+# ASM-SAME: // $d15 @ cfa - 64 * VG - 16
#
# ASM: .cfi_restore z8
# ASM-NEXT: .cfi_restore z9
@@ -1093,14 +1143,14 @@ body: |
# UNWINDINFO: DW_CFA_def_cfa: reg29 +16
# UNWINDINFO-NEXT: DW_CFA_offset: reg30 -8
# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
-# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -16, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO-NEXT: DW_CFA_expression: reg75 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -32, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO-NEXT: DW_CFA_expression: reg76 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -40, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO-NEXT: DW_CFA_expression: reg77 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -48, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO-NEXT: DW_CFA_expression: reg78 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -56, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -64, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_bregx 0x2e +0, DW_OP_consts -8, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus
+# UNWINDINFO-NEXT: DW_CFA_expression: reg73 DW_OP_bregx 0x2e +0, DW_OP_consts -16, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus
+# UNWINDINFO-NEXT: DW_CFA_expression: reg74 DW_OP_bregx 0x2e +0, DW_OP_consts -24, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus
+# UNWINDINFO-NEXT: DW_CFA_expression: reg75 DW_OP_bregx 0x2e +0, DW_OP_consts -32, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus
+# UNWINDINFO-NEXT: DW_CFA_expression: reg76 DW_OP_bregx 0x2e +0, DW_OP_consts -40, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus
+# UNWINDINFO-NEXT: DW_CFA_expression: reg77 DW_OP_bregx 0x2e +0, DW_OP_consts -48, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus
+# UNWINDINFO-NEXT: DW_CFA_expression: reg78 DW_OP_bregx 0x2e +0, DW_OP_consts -56, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus
+# UNWINDINFO-NEXT: DW_CFA_expression: reg79 DW_OP_bregx 0x2e +0, DW_OP_consts -64, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus
#
# UNWINDINFO: DW_CFA_restore_extended: reg104
# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg105
@@ -1188,17 +1238,17 @@ body: |
# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK-NEXT: STR_PXI killed $p15, $sp, 6
# CHECK-NEXT: STR_PXI killed $p4, $sp, 7
# CHECK-NEXT: STR_ZXI killed $z23, $sp, 1
# CHECK-NEXT: STR_ZXI killed $z8, $sp, 2
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -7
-# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xd0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape
# CHECK: $sp = frame-destroy ADDVL_XXI $sp, 7
-# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape
# CHECK-NEXT: $z23 = frame-destroy LDR_ZXI $sp, 1
# CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2
# CHECK-NEXT: $p15 = frame-destroy LDR_PXI $sp, 6
@@ -1214,11 +1264,15 @@ body: |
# ASM-LABEL: frame_layout:
# ASM: .cfi_def_cfa_offset 16
# ASM-NEXT: .cfi_offset w29, -16
-# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
-# ASM: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-# ASM: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xd0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 80 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 24 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // $d8 @ cfa - 8 * VG - 16
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 80 * VG
#
-# ASM: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+# ASM: .cfi_escape
+# ASM-SAME: // sp + 16 + 24 * VG
# ASM: .cfi_def_cfa wsp, 16
# ASM-NEXT: .cfi_restore z8
# ASM: .cfi_def_cfa_offset 0
@@ -1226,11 +1280,11 @@ body: |
# UNWINDINFO: DW_CFA_def_cfa_offset: +16
# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_consts -16, DW_OP_plus, DW_OP_consts -8, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +80, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_bregx 0x2e +0, DW_OP_consts -8, DW_OP_mul, DW_OP_plus, DW_OP_lit16, DW_OP_minus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +80, DW_OP_mul, DW_OP_plus
#
-# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +0, DW_OP_consts +16, DW_OP_plus, DW_OP_consts +24, DW_OP_bregx 0x2e +0, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus
# UNWINDINFO: DW_CFA_def_cfa: reg31 +16
# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg104
# UNWINDINFO: DW_CFA_def_cfa_offset: +0
diff --git a/llvm/test/CodeGen/AArch64/intrinsic-vector-match-sve2.ll b/llvm/test/CodeGen/AArch64/intrinsic-vector-match-sve2.ll
index 2cf8621..474a9d1 100644
--- a/llvm/test/CodeGen/AArch64/intrinsic-vector-match-sve2.ll
+++ b/llvm/test/CodeGen/AArch64/intrinsic-vector-match-sve2.ll
@@ -36,7 +36,7 @@ define <vscale x 16 x i1> @match_nxv16i8_v4i8(<vscale x 16 x i8> %op1, <4 x i8>
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: // kill: def $d1 killed $d1 def $q1
; CHECK-NEXT: umov w8, v1.h[1]
@@ -241,7 +241,7 @@ define <vscale x 16 x i1> @match_nxv16i8_v32i8(<vscale x 16 x i8> %op1, <32 x i8
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
; CHECK-NEXT: mov z3.b, z1.b[1]
@@ -463,7 +463,7 @@ define <vscale x 4 x i1> @match_nxv4xi32_v4i32(<vscale x 4 x i32> %op1, <4 x i32
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
; CHECK-NEXT: mov z2.s, z1.s[1]
diff --git a/llvm/test/CodeGen/AArch64/luti-with-sme2.ll b/llvm/test/CodeGen/AArch64/luti-with-sme2.ll
index 2d30167..59e1cba 100644
--- a/llvm/test/CodeGen/AArch64/luti-with-sme2.ll
+++ b/llvm/test/CodeGen/AArch64/luti-with-sme2.ll
@@ -9,10 +9,10 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16> } @test_luti4_lane_i16_x2_tuple(
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16
; CHECK-NEXT: ptrue pn8.b
; CHECK-NEXT: add x8, x1, x0
; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1]
@@ -50,10 +50,10 @@ define { <vscale x 8 x half>, <vscale x 8 x half> } @test_luti4_lane_f16_x2_tupl
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16
; CHECK-NEXT: ptrue pn8.b
; CHECK-NEXT: add x8, x1, x0
; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1]
@@ -91,10 +91,10 @@ define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @test_luti4_lane_bf16_x2
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16
; CHECK-NEXT: ptrue pn8.b
; CHECK-NEXT: add x8, x1, x0
; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1]
diff --git a/llvm/test/CodeGen/AArch64/midpoint-int.ll b/llvm/test/CodeGen/AArch64/midpoint-int.ll
index 15c1dff..79bba53 100644
--- a/llvm/test/CodeGen/AArch64/midpoint-int.ll
+++ b/llvm/test/CodeGen/AArch64/midpoint-int.ll
@@ -255,12 +255,11 @@ define i64 @scalar_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
define i16 @scalar_i16_signed_reg_reg(i16 %a1, i16 %a2) nounwind {
; CHECK-LABEL: scalar_i16_signed_reg_reg:
; CHECK: // %bb.0:
-; CHECK-NEXT: sxth w9, w1
-; CHECK-NEXT: sxth w10, w0
+; CHECK-NEXT: sxth w9, w0
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
-; CHECK-NEXT: subs w9, w10, w9
-; CHECK-NEXT: cneg w9, w9, mi
+; CHECK-NEXT: subs w9, w9, w1, sxth
; CHECK-NEXT: cneg w8, w8, le
+; CHECK-NEXT: cneg w9, w9, mi
; CHECK-NEXT: lsr w9, w9, #1
; CHECK-NEXT: madd w0, w9, w8, w0
; CHECK-NEXT: ret
@@ -278,12 +277,11 @@ define i16 @scalar_i16_signed_reg_reg(i16 %a1, i16 %a2) nounwind {
define i16 @scalar_i16_unsigned_reg_reg(i16 %a1, i16 %a2) nounwind {
; CHECK-LABEL: scalar_i16_unsigned_reg_reg:
; CHECK: // %bb.0:
-; CHECK-NEXT: and w9, w1, #0xffff
-; CHECK-NEXT: and w10, w0, #0xffff
+; CHECK-NEXT: and w9, w0, #0xffff
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
-; CHECK-NEXT: subs w9, w10, w9
-; CHECK-NEXT: cneg w9, w9, mi
+; CHECK-NEXT: subs w9, w9, w1, uxth
; CHECK-NEXT: cneg w8, w8, ls
+; CHECK-NEXT: cneg w9, w9, mi
; CHECK-NEXT: lsr w9, w9, #1
; CHECK-NEXT: madd w0, w9, w8, w0
; CHECK-NEXT: ret
@@ -303,14 +301,13 @@ define i16 @scalar_i16_unsigned_reg_reg(i16 %a1, i16 %a2) nounwind {
define i16 @scalar_i16_signed_mem_reg(ptr %a1_addr, i16 %a2) nounwind {
; CHECK-LABEL: scalar_i16_signed_mem_reg:
; CHECK: // %bb.0:
-; CHECK-NEXT: sxth w9, w1
-; CHECK-NEXT: ldrsh w10, [x0]
+; CHECK-NEXT: ldrsh w9, [x0]
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
-; CHECK-NEXT: subs w9, w10, w9
-; CHECK-NEXT: cneg w9, w9, mi
+; CHECK-NEXT: subs w10, w9, w1, sxth
; CHECK-NEXT: cneg w8, w8, le
-; CHECK-NEXT: lsr w9, w9, #1
-; CHECK-NEXT: madd w0, w9, w8, w10
+; CHECK-NEXT: cneg w10, w10, mi
+; CHECK-NEXT: lsr w10, w10, #1
+; CHECK-NEXT: madd w0, w10, w8, w9
; CHECK-NEXT: ret
%a1 = load i16, ptr %a1_addr
%t3 = icmp sgt i16 %a1, %a2 ; signed
@@ -382,12 +379,11 @@ define i16 @scalar_i16_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
define i8 @scalar_i8_signed_reg_reg(i8 %a1, i8 %a2) nounwind {
; CHECK-LABEL: scalar_i8_signed_reg_reg:
; CHECK: // %bb.0:
-; CHECK-NEXT: sxtb w9, w1
-; CHECK-NEXT: sxtb w10, w0
+; CHECK-NEXT: sxtb w9, w0
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
-; CHECK-NEXT: subs w9, w10, w9
-; CHECK-NEXT: cneg w9, w9, mi
+; CHECK-NEXT: subs w9, w9, w1, sxtb
; CHECK-NEXT: cneg w8, w8, le
+; CHECK-NEXT: cneg w9, w9, mi
; CHECK-NEXT: lsr w9, w9, #1
; CHECK-NEXT: madd w0, w9, w8, w0
; CHECK-NEXT: ret
@@ -405,12 +401,11 @@ define i8 @scalar_i8_signed_reg_reg(i8 %a1, i8 %a2) nounwind {
define i8 @scalar_i8_unsigned_reg_reg(i8 %a1, i8 %a2) nounwind {
; CHECK-LABEL: scalar_i8_unsigned_reg_reg:
; CHECK: // %bb.0:
-; CHECK-NEXT: and w9, w1, #0xff
-; CHECK-NEXT: and w10, w0, #0xff
+; CHECK-NEXT: and w9, w0, #0xff
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
-; CHECK-NEXT: subs w9, w10, w9
-; CHECK-NEXT: cneg w9, w9, mi
+; CHECK-NEXT: subs w9, w9, w1, uxtb
; CHECK-NEXT: cneg w8, w8, ls
+; CHECK-NEXT: cneg w9, w9, mi
; CHECK-NEXT: lsr w9, w9, #1
; CHECK-NEXT: madd w0, w9, w8, w0
; CHECK-NEXT: ret
@@ -430,14 +425,13 @@ define i8 @scalar_i8_unsigned_reg_reg(i8 %a1, i8 %a2) nounwind {
define i8 @scalar_i8_signed_mem_reg(ptr %a1_addr, i8 %a2) nounwind {
; CHECK-LABEL: scalar_i8_signed_mem_reg:
; CHECK: // %bb.0:
-; CHECK-NEXT: sxtb w9, w1
-; CHECK-NEXT: ldrsb w10, [x0]
+; CHECK-NEXT: ldrsb w9, [x0]
; CHECK-NEXT: mov w8, #-1 // =0xffffffff
-; CHECK-NEXT: subs w9, w10, w9
-; CHECK-NEXT: cneg w9, w9, mi
+; CHECK-NEXT: subs w10, w9, w1, sxtb
; CHECK-NEXT: cneg w8, w8, le
-; CHECK-NEXT: lsr w9, w9, #1
-; CHECK-NEXT: madd w0, w9, w8, w10
+; CHECK-NEXT: cneg w10, w10, mi
+; CHECK-NEXT: lsr w10, w10, #1
+; CHECK-NEXT: madd w0, w10, w8, w9
; CHECK-NEXT: ret
%a1 = load i8, ptr %a1_addr
%t3 = icmp sgt i8 %a1, %a2 ; signed
diff --git a/llvm/test/CodeGen/AArch64/perm-tb-with-sme2.ll b/llvm/test/CodeGen/AArch64/perm-tb-with-sme2.ll
index 7b55c69..1ceb25b 100644
--- a/llvm/test/CodeGen/AArch64/perm-tb-with-sme2.ll
+++ b/llvm/test/CodeGen/AArch64/perm-tb-with-sme2.ll
@@ -13,10 +13,10 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @tbl2_b_tuple(i64 %stride, ptr
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16
; CHECK-NEXT: ptrue pn8.b
; CHECK-NEXT: ld1b { z3.b, z11.b }, pn8/z, [x1]
; CHECK-NEXT: ld1b { z4.b, z12.b }, pn8/z, [x1, x0]
@@ -53,10 +53,10 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16> } @tbl2_h_tuple(i64 %stride, ptr
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16
; CHECK-NEXT: ptrue pn8.b
; CHECK-NEXT: add x8, x1, x0
; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1]
@@ -94,10 +94,10 @@ define { <vscale x 4 x i32>, <vscale x 4 x i32> } @tbl2_s_tuple(i64 %stride, ptr
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16
; CHECK-NEXT: ptrue pn8.b
; CHECK-NEXT: add x8, x1, x0
; CHECK-NEXT: ld1w { z3.s, z11.s }, pn8/z, [x1]
@@ -135,10 +135,10 @@ define { <vscale x 2 x i64>, <vscale x 2 x i64> } @tbl2_d_tuple(i64 %stride, ptr
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16
; CHECK-NEXT: ptrue pn8.b
; CHECK-NEXT: add x8, x1, x0
; CHECK-NEXT: ld1d { z3.d, z11.d }, pn8/z, [x1]
@@ -176,10 +176,10 @@ define { <vscale x 8 x bfloat>, <vscale x 8 x bfloat> } @tbl2_bf16_tuple(i64 %st
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16
; CHECK-NEXT: ptrue pn8.b
; CHECK-NEXT: add x8, x1, x0
; CHECK-NEXT: ld1h { z3.h, z11.h }, pn8/z, [x1]
@@ -217,10 +217,10 @@ define { <vscale x 4 x float>, <vscale x 4 x float> } @tbl2_f32_tuple(i64 %strid
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16
; CHECK-NEXT: ptrue pn8.b
; CHECK-NEXT: add x8, x1, x0
; CHECK-NEXT: ld1w { z3.s, z11.s }, pn8/z, [x1]
@@ -258,10 +258,10 @@ define { <vscale x 2 x double>, <vscale x 2 x double> } @tbl2_f64_tuple(i64 %str
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str z12, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z11, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 16 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 16 * VG - 16
; CHECK-NEXT: ptrue pn8.b
; CHECK-NEXT: add x8, x1, x0
; CHECK-NEXT: ld1d { z3.d, z11.d }, pn8/z, [x1]
diff --git a/llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll b/llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll
index 0853325..6fcfc5b 100644
--- a/llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll
+++ b/llvm/test/CodeGen/AArch64/sme-vg-to-stack.ll
@@ -328,7 +328,7 @@ define void @vg_unwind_with_sve_args(<vscale x 2 x i64> %x) #0 {
; CHECK-NEXT: .cfi_offset w30, -24
; CHECK-NEXT: .cfi_offset w29, -32
; CHECK-NEXT: addvl sp, sp, #-18
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 32 + 144 * VG
; CHECK-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: ptrue pn8.b
; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
@@ -351,16 +351,16 @@ define void @vg_unwind_with_sve_args(<vscale x 2 x i64> %x) #0 {
; CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 32 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 32 - 16 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 32 - 24 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 32 - 32 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 32 - 40 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 32 - 48 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 32 - 56 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 32 - 64 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d8 @ cfa - 8 * VG - 32
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d9 @ cfa - 16 * VG - 32
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d10 @ cfa - 24 * VG - 32
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d11 @ cfa - 32 * VG - 32
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d12 @ cfa - 40 * VG - 32
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d13 @ cfa - 48 * VG - 32
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d14 @ cfa - 56 * VG - 32
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d15 @ cfa - 64 * VG - 32
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 152 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x11, 0x98, 0x01, 0x1e, 0x22 // sp + 32 + 152 * VG
; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill
; CHECK-NEXT: //APP
; CHECK-NEXT: //NO_APP
@@ -371,7 +371,7 @@ define void @vg_unwind_with_sve_args(<vscale x 2 x i64> %x) #0 {
; CHECK-NEXT: smstart sm
; CHECK-NEXT: .cfi_restore vg
; CHECK-NEXT: addvl sp, sp, #1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 144 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 32 + 144 * VG
; CHECK-NEXT: ptrue pn8.b
; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
@@ -448,14 +448,14 @@ define void @vg_unwind_with_sve_args(<vscale x 2 x i64> %x) #0 {
; FP-CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
; FP-CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; FP-CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; FP-CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 48 - 8 * VG
-; FP-CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 48 - 16 * VG
-; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 48 - 24 * VG
-; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 48 - 32 * VG
-; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 48 - 40 * VG
-; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 48 - 48 * VG
-; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 48 - 56 * VG
-; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 48 - 64 * VG
+; FP-CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d8 @ cfa - 8 * VG - 48
+; FP-CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d9 @ cfa - 16 * VG - 48
+; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d10 @ cfa - 24 * VG - 48
+; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d11 @ cfa - 32 * VG - 48
+; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d12 @ cfa - 40 * VG - 48
+; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d13 @ cfa - 48 * VG - 48
+; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d14 @ cfa - 56 * VG - 48
+; FP-CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d15 @ cfa - 64 * VG - 48
; FP-CHECK-NEXT: addvl sp, sp, #-1
; FP-CHECK-NEXT: str z0, [x29, #-19, mul vl] // 16-byte Folded Spill
; FP-CHECK-NEXT: //APP
diff --git a/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-cvt.ll b/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-cvt.ll
index b0390ec..8398e07 100644
--- a/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-cvt.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-fp8-intrinsics-cvt.ll
@@ -36,7 +36,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1
; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc8, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: lsl x8, x0, #1
; CHECK-NEXT: add x9, x1, x0
@@ -129,10 +129,10 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8> } @bfcvt_tuple(i64 %stride, ptr
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 16 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 16 * VG - 16
; CHECK-NEXT: ptrue pn8.b
; CHECK-NEXT: add x8, x1, x0
; CHECK-NEXT: ld1h { z2.h, z10.h }, pn8/z, [x1]
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-qcvt.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-qcvt.ll
index b4a83c1..58d2e25 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-qcvt.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-qcvt.ll
@@ -58,7 +58,7 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8
; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc8, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: lsl x8, x0, #1
; CHECK-NEXT: add x9, x1, x0
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-qrshr.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-qrshr.ll
index 0bc9e15..3bb516d 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-qrshr.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-qrshr.ll
@@ -24,10 +24,10 @@ define { <vscale x 8 x i16>, <vscale x 8 x i16> } @multi_vector_sat_shift_narrow
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str z11, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z10, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 16 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 16 * VG - 16
; CHECK-NEXT: ptrue pn8.b
; CHECK-NEXT: add x8, x1, x0
; CHECK-NEXT: ld1w { z2.s, z10.s }, pn8/z, [x1]
@@ -98,7 +98,7 @@ define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 1
; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc8, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: lsl x8, x0, #1
; CHECK-NEXT: add x9, x1, x0
diff --git a/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir b/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir
index 1d04cc6..c3338b1 100644
--- a/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir
+++ b/llvm/test/CodeGen/AArch64/sme2-multivec-regalloc.mir
@@ -17,7 +17,7 @@ body: |
; CHECK-NEXT: stp d9, d8, [sp, #16]
; CHECK-NEXT: str x29, [sp, #32]
; CHECK-NEXT: addvl sp, sp, #-2
- ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 16 * VG
+ ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x30, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 48 + 16 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: .cfi_offset b8, -24
; CHECK-NEXT: .cfi_offset b9, -32
@@ -97,7 +97,7 @@ body: |
; CHECK: str x29, [sp, #-16]!
; CHECK-NEXT: addvl sp, sp, #-2
- ; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+ ; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: lsl x9, x1, #1
; CHECK-NEXT: ptrue pn8.b
diff --git a/llvm/test/CodeGen/AArch64/split-vector-insert.ll b/llvm/test/CodeGen/AArch64/split-vector-insert.ll
index 555e38a..109059e 100644
--- a/llvm/test/CodeGen/AArch64/split-vector-insert.ll
+++ b/llvm/test/CodeGen/AArch64/split-vector-insert.ll
@@ -16,7 +16,7 @@ define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> %
; CHECK-LEGALIZATION-NEXT: .cfi_def_cfa_offset 16
; CHECK-LEGALIZATION-NEXT: .cfi_offset w29, -16
; CHECK-LEGALIZATION-NEXT: addvl sp, sp, #-3
-; CHECK-LEGALIZATION-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-LEGALIZATION-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-LEGALIZATION-NEXT: cntd x8
; CHECK-LEGALIZATION-NEXT: ptrue p0.d, vl2
; CHECK-LEGALIZATION-NEXT: mov w9, #2 // =0x2
@@ -59,7 +59,7 @@ define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> %
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl sp, sp, #-3
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: cntd x8
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: mov w9, #2 // =0x2
@@ -111,7 +111,7 @@ define <vscale x 2 x double> @test_nxv2f64_v8f64(<vscale x 2 x double> %a, <8 x
; CHECK-LEGALIZATION-NEXT: .cfi_def_cfa_offset 16
; CHECK-LEGALIZATION-NEXT: .cfi_offset w29, -16
; CHECK-LEGALIZATION-NEXT: addvl sp, sp, #-3
-; CHECK-LEGALIZATION-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-LEGALIZATION-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-LEGALIZATION-NEXT: cntd x8
; CHECK-LEGALIZATION-NEXT: ptrue p0.d, vl2
; CHECK-LEGALIZATION-NEXT: mov w9, #2 // =0x2
@@ -154,7 +154,7 @@ define <vscale x 2 x double> @test_nxv2f64_v8f64(<vscale x 2 x double> %a, <8 x
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl sp, sp, #-3
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: cntd x8
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: mov w9, #2 // =0x2
diff --git a/llvm/test/CodeGen/AArch64/stack-hazard.ll b/llvm/test/CodeGen/AArch64/stack-hazard.ll
index 3a33405..4615b1a 100644
--- a/llvm/test/CodeGen/AArch64/stack-hazard.ll
+++ b/llvm/test/CodeGen/AArch64/stack-hazard.ll
@@ -388,7 +388,7 @@ define i32 @csr_d8_allocnxv4i32(i64 %d) "aarch64_pstate_sm_compatible" {
; CHECK0-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill
; CHECK0-NEXT: str x29, [sp, #8] // 8-byte Folded Spill
; CHECK0-NEXT: addvl sp, sp, #-1
-; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK0-NEXT: .cfi_offset w29, -8
; CHECK0-NEXT: .cfi_offset b8, -16
; CHECK0-NEXT: mov z0.s, #0 // =0x0
@@ -407,7 +407,7 @@ define i32 @csr_d8_allocnxv4i32(i64 %d) "aarch64_pstate_sm_compatible" {
; CHECK64-NEXT: str x29, [sp, #72] // 8-byte Folded Spill
; CHECK64-NEXT: sub sp, sp, #64
; CHECK64-NEXT: addvl sp, sp, #-1
-; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 144 + 8 * VG
+; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 144 + 8 * VG
; CHECK64-NEXT: .cfi_offset w29, -8
; CHECK64-NEXT: .cfi_offset b8, -80
; CHECK64-NEXT: mov z0.s, #0 // =0x0
@@ -429,7 +429,7 @@ define i32 @csr_d8_allocnxv4i32(i64 %d) "aarch64_pstate_sm_compatible" {
; CHECK1024-NEXT: str x29, [sp, #1032] // 8-byte Folded Spill
; CHECK1024-NEXT: sub sp, sp, #1024
; CHECK1024-NEXT: addvl sp, sp, #-1
-; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2064 + 8 * VG
+; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2064 + 8 * VG
; CHECK1024-NEXT: .cfi_offset w29, -8
; CHECK1024-NEXT: .cfi_offset b8, -1040
; CHECK1024-NEXT: mov z0.s, #0 // =0x0
@@ -955,9 +955,9 @@ define i32 @svecc_csr_d8(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_psta
; CHECK0-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK0-NEXT: addvl sp, sp, #-1
; CHECK0-NEXT: str z8, [sp] // 16-byte Folded Spill
-; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK0-NEXT: .cfi_offset w29, -16
-; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
; CHECK0-NEXT: //APP
; CHECK0-NEXT: //NO_APP
; CHECK0-NEXT: mov w0, wzr
@@ -973,9 +973,9 @@ define i32 @svecc_csr_d8(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_psta
; CHECK64-NEXT: addvl sp, sp, #-1
; CHECK64-NEXT: str z8, [sp] // 16-byte Folded Spill
; CHECK64-NEXT: sub sp, sp, #64
-; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 144 + 8 * VG
+; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 144 + 8 * VG
; CHECK64-NEXT: .cfi_offset w29, -16
-; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 80 - 8 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 80
; CHECK64-NEXT: mov w0, wzr
; CHECK64-NEXT: //APP
; CHECK64-NEXT: //NO_APP
@@ -993,9 +993,9 @@ define i32 @svecc_csr_d8(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_psta
; CHECK1024-NEXT: addvl sp, sp, #-1
; CHECK1024-NEXT: str z8, [sp] // 16-byte Folded Spill
; CHECK1024-NEXT: sub sp, sp, #1024
-; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2064 + 8 * VG
+; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2064 + 8 * VG
; CHECK1024-NEXT: .cfi_offset w29, -16
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1040 - 8 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1040
; CHECK1024-NEXT: mov w0, wzr
; CHECK1024-NEXT: //APP
; CHECK1024-NEXT: //NO_APP
@@ -1017,10 +1017,10 @@ define i32 @svecc_csr_d8d9(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_ps
; CHECK0-NEXT: addvl sp, sp, #-2
; CHECK0-NEXT: str z9, [sp] // 16-byte Folded Spill
; CHECK0-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK0-NEXT: .cfi_offset w29, -16
-; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
; CHECK0-NEXT: //APP
; CHECK0-NEXT: //NO_APP
; CHECK0-NEXT: mov w0, wzr
@@ -1038,10 +1038,10 @@ define i32 @svecc_csr_d8d9(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_ps
; CHECK64-NEXT: str z9, [sp] // 16-byte Folded Spill
; CHECK64-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK64-NEXT: sub sp, sp, #64
-; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 144 + 16 * VG
+; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 144 + 16 * VG
; CHECK64-NEXT: .cfi_offset w29, -16
-; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 80 - 8 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 80 - 16 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 80
+; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 80
; CHECK64-NEXT: mov w0, wzr
; CHECK64-NEXT: //APP
; CHECK64-NEXT: //NO_APP
@@ -1061,10 +1061,10 @@ define i32 @svecc_csr_d8d9(i32 noundef %num, <vscale x 4 x i32> %vs) "aarch64_ps
; CHECK1024-NEXT: str z9, [sp] // 16-byte Folded Spill
; CHECK1024-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK1024-NEXT: sub sp, sp, #1024
-; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2064 + 16 * VG
+; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 2064 + 16 * VG
; CHECK1024-NEXT: .cfi_offset w29, -16
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1040 - 8 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1040 - 16 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1040
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1040
; CHECK1024-NEXT: mov w0, wzr
; CHECK1024-NEXT: //APP
; CHECK1024-NEXT: //NO_APP
@@ -1086,9 +1086,9 @@ define i32 @svecc_csr_d8_allocd(double %d, <vscale x 4 x i32> %vs) "aarch64_psta
; CHECK0-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK0-NEXT: addvl sp, sp, #-1
; CHECK0-NEXT: str z8, [sp] // 16-byte Folded Spill
-; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK0-NEXT: .cfi_offset w29, -16
-; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
; CHECK0-NEXT: //APP
; CHECK0-NEXT: //NO_APP
; CHECK0-NEXT: addvl x8, sp, #1
@@ -1106,9 +1106,9 @@ define i32 @svecc_csr_d8_allocd(double %d, <vscale x 4 x i32> %vs) "aarch64_psta
; CHECK64-NEXT: addvl sp, sp, #-1
; CHECK64-NEXT: str z8, [sp] // 16-byte Folded Spill
; CHECK64-NEXT: sub sp, sp, #80
-; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0xa0, 0x01, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 160 + 8 * VG
+; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 160 + 8 * VG
; CHECK64-NEXT: .cfi_offset w29, -16
-; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 80 - 8 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 80
; CHECK64-NEXT: mov w0, wzr
; CHECK64-NEXT: //APP
; CHECK64-NEXT: //NO_APP
@@ -1127,9 +1127,9 @@ define i32 @svecc_csr_d8_allocd(double %d, <vscale x 4 x i32> %vs) "aarch64_psta
; CHECK1024-NEXT: addvl sp, sp, #-1
; CHECK1024-NEXT: str z8, [sp] // 16-byte Folded Spill
; CHECK1024-NEXT: sub sp, sp, #1040
-; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0xa0, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2080 + 8 * VG
+; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG
; CHECK1024-NEXT: .cfi_offset w29, -16
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1040 - 8 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1040
; CHECK1024-NEXT: mov w0, wzr
; CHECK1024-NEXT: //APP
; CHECK1024-NEXT: //NO_APP
@@ -1153,9 +1153,9 @@ define i32 @svecc_csr_d8_alloci64(i64 %d, <vscale x 4 x i32> %vs) "aarch64_pstat
; CHECK0-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK0-NEXT: addvl sp, sp, #-1
; CHECK0-NEXT: str z8, [sp] // 16-byte Folded Spill
-; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK0-NEXT: .cfi_offset w29, -16
-; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
; CHECK0-NEXT: //APP
; CHECK0-NEXT: //NO_APP
; CHECK0-NEXT: mov x8, x0
@@ -1174,9 +1174,9 @@ define i32 @svecc_csr_d8_alloci64(i64 %d, <vscale x 4 x i32> %vs) "aarch64_pstat
; CHECK64-NEXT: addvl sp, sp, #-1
; CHECK64-NEXT: str z8, [sp] // 16-byte Folded Spill
; CHECK64-NEXT: sub sp, sp, #80
-; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0xa0, 0x01, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 160 + 8 * VG
+; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 160 + 8 * VG
; CHECK64-NEXT: .cfi_offset w29, -16
-; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 80 - 8 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 80
; CHECK64-NEXT: mov x8, x0
; CHECK64-NEXT: mov w0, wzr
; CHECK64-NEXT: //APP
@@ -1196,9 +1196,9 @@ define i32 @svecc_csr_d8_alloci64(i64 %d, <vscale x 4 x i32> %vs) "aarch64_pstat
; CHECK1024-NEXT: addvl sp, sp, #-1
; CHECK1024-NEXT: str z8, [sp] // 16-byte Folded Spill
; CHECK1024-NEXT: sub sp, sp, #1040
-; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0xa0, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2080 + 8 * VG
+; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG
; CHECK1024-NEXT: .cfi_offset w29, -16
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1040 - 8 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1040
; CHECK1024-NEXT: mov x8, x0
; CHECK1024-NEXT: mov w0, wzr
; CHECK1024-NEXT: //APP
@@ -1224,9 +1224,9 @@ define i32 @svecc_csr_d8_allocnxv4i32(i64 %d, <vscale x 4 x i32> %vs) "aarch64_p
; CHECK0-NEXT: addvl sp, sp, #-1
; CHECK0-NEXT: str z8, [sp] // 16-byte Folded Spill
; CHECK0-NEXT: addvl sp, sp, #-1
-; CHECK0-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK0-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK0-NEXT: .cfi_offset w29, -16
-; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
; CHECK0-NEXT: mov z0.s, #0 // =0x0
; CHECK0-NEXT: mov w0, wzr
; CHECK0-NEXT: //APP
@@ -1246,9 +1246,9 @@ define i32 @svecc_csr_d8_allocnxv4i32(i64 %d, <vscale x 4 x i32> %vs) "aarch64_p
; CHECK64-NEXT: str z8, [sp] // 16-byte Folded Spill
; CHECK64-NEXT: sub sp, sp, #64
; CHECK64-NEXT: addvl sp, sp, #-1
-; CHECK64-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 144 + 16 * VG
+; CHECK64-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 144 + 16 * VG
; CHECK64-NEXT: .cfi_offset w29, -16
-; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xb0, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 80 - 8 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xb0, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 80
; CHECK64-NEXT: mov z0.s, #0 // =0x0
; CHECK64-NEXT: add x8, sp, #64
; CHECK64-NEXT: mov w0, wzr
@@ -1271,9 +1271,9 @@ define i32 @svecc_csr_d8_allocnxv4i32(i64 %d, <vscale x 4 x i32> %vs) "aarch64_p
; CHECK1024-NEXT: str z8, [sp] // 16-byte Folded Spill
; CHECK1024-NEXT: sub sp, sp, #1024
; CHECK1024-NEXT: addvl sp, sp, #-1
-; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x90, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2064 + 16 * VG
+; CHECK1024-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 2064 + 16 * VG
; CHECK1024-NEXT: .cfi_offset w29, -16
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xf0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1040 - 8 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1040
; CHECK1024-NEXT: mov z0.s, #0 // =0x0
; CHECK1024-NEXT: add x8, sp, #1024
; CHECK1024-NEXT: mov w0, wzr
@@ -1311,7 +1311,7 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3
; CHECK0-NEXT: str z9, [sp, #6, mul vl] // 16-byte Folded Spill
; CHECK0-NEXT: str z8, [sp, #7, mul vl] // 16-byte Folded Spill
; CHECK0-NEXT: sub sp, sp, #16
-; CHECK0-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 80 + 64 * VG
+; CHECK0-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xd0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 80 + 64 * VG
; CHECK0-NEXT: .cfi_offset w19, -8
; CHECK0-NEXT: .cfi_offset w20, -16
; CHECK0-NEXT: .cfi_offset w21, -24
@@ -1320,14 +1320,14 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3
; CHECK0-NEXT: .cfi_offset w24, -48
; CHECK0-NEXT: .cfi_offset w25, -56
; CHECK0-NEXT: .cfi_offset w29, -64
-; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 64 - 8 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 64 - 16 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 64 - 24 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 64 - 32 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 64 - 40 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 64 - 48 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 64 - 56 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 64 - 64 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d8 @ cfa - 8 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d9 @ cfa - 16 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d10 @ cfa - 24 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d11 @ cfa - 32 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d12 @ cfa - 40 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d13 @ cfa - 48 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d14 @ cfa - 56 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d15 @ cfa - 64 * VG - 64
; CHECK0-NEXT: mov x8, x0
; CHECK0-NEXT: mov w0, wzr
; CHECK0-NEXT: //APP
@@ -1368,7 +1368,7 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3
; CHECK64-NEXT: str z9, [sp, #6, mul vl] // 16-byte Folded Spill
; CHECK64-NEXT: str z8, [sp, #7, mul vl] // 16-byte Folded Spill
; CHECK64-NEXT: sub sp, sp, #96
-; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xe0, 0x01, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 224 + 64 * VG
+; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xe0, 0x01, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 224 + 64 * VG
; CHECK64-NEXT: .cfi_offset w19, -8
; CHECK64-NEXT: .cfi_offset w20, -16
; CHECK64-NEXT: .cfi_offset w21, -24
@@ -1377,14 +1377,14 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3
; CHECK64-NEXT: .cfi_offset w24, -48
; CHECK64-NEXT: .cfi_offset w25, -56
; CHECK64-NEXT: .cfi_offset w29, -64
-; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 128 - 8 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 128 - 16 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 128 - 24 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 128 - 32 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 128 - 40 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 128 - 48 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 128 - 56 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 128 - 64 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 128
; CHECK64-NEXT: mov x8, x0
; CHECK64-NEXT: mov w0, wzr
; CHECK64-NEXT: //APP
@@ -1431,7 +1431,7 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3
; CHECK1024-NEXT: str z9, [sp, #6, mul vl] // 16-byte Folded Spill
; CHECK1024-NEXT: str z8, [sp, #7, mul vl] // 16-byte Folded Spill
; CHECK1024-NEXT: sub sp, sp, #1056
-; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xe0, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2144 + 64 * VG
+; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xe0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 2144 + 64 * VG
; CHECK1024-NEXT: .cfi_offset w19, -8
; CHECK1024-NEXT: .cfi_offset w20, -16
; CHECK1024-NEXT: .cfi_offset w21, -24
@@ -1440,14 +1440,14 @@ define i32 @svecc_csr_x18_25_d8_15_allocdi64(i64 %d, double %e, <vscale x 4 x i3
; CHECK1024-NEXT: .cfi_offset w24, -48
; CHECK1024-NEXT: .cfi_offset w25, -56
; CHECK1024-NEXT: .cfi_offset w29, -64
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1088 - 8 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1088 - 16 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1088 - 24 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1088 - 32 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1088 - 40 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1088 - 48 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1088 - 56 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1088 - 64 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1088
; CHECK1024-NEXT: mov x8, x0
; CHECK1024-NEXT: mov w0, wzr
; CHECK1024-NEXT: //APP
@@ -1869,7 +1869,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3,
; CHECK0-NEXT: .cfi_offset w30, -40
; CHECK0-NEXT: .cfi_offset w29, -48
; CHECK0-NEXT: addvl sp, sp, #-18
-; CHECK0-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 144 * VG
+; CHECK0-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x30, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 48 + 144 * VG
; CHECK0-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK0-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK0-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
@@ -1898,14 +1898,14 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3,
; CHECK0-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK0-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK0-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 48 - 8 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 48 - 16 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 48 - 24 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 48 - 32 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 48 - 40 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 48 - 48 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 48 - 56 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 48 - 64 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d8 @ cfa - 8 * VG - 48
+; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d9 @ cfa - 16 * VG - 48
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d10 @ cfa - 24 * VG - 48
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d11 @ cfa - 32 * VG - 48
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d12 @ cfa - 40 * VG - 48
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d13 @ cfa - 48 * VG - 48
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d14 @ cfa - 56 * VG - 48
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d15 @ cfa - 64 * VG - 48
; CHECK0-NEXT: mov x8, x0
; CHECK0-NEXT: //APP
; CHECK0-NEXT: //NO_APP
@@ -1990,7 +1990,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3,
; CHECK64-NEXT: .cfi_offset w30, -40
; CHECK64-NEXT: .cfi_offset w29, -48
; CHECK64-NEXT: addvl sp, sp, #-18
-; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xf0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 112 + 144 * VG
+; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xf0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 112 + 144 * VG
; CHECK64-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK64-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK64-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
@@ -2019,16 +2019,16 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3,
; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 112 - 8 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 112 - 16 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 112 - 24 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 112 - 32 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 112 - 40 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 112 - 48 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 112 - 56 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 112 - 64 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 112
+; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 112
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 112
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 112
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 112
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 112
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 112
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 112
; CHECK64-NEXT: sub sp, sp, #64
-; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x01, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 176 + 144 * VG
+; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x01, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 176 + 144 * VG
; CHECK64-NEXT: mov x8, x0
; CHECK64-NEXT: //APP
; CHECK64-NEXT: //NO_APP
@@ -2051,7 +2051,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3,
; CHECK64-NEXT: movk w0, #59491, lsl #16
; CHECK64-NEXT: .cfi_restore vg
; CHECK64-NEXT: add sp, sp, #64
-; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xf0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 112 + 144 * VG
+; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xf0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 112 + 144 * VG
; CHECK64-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK64-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
; CHECK64-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
@@ -2119,7 +2119,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3,
; CHECK1024-NEXT: .cfi_offset w30, -40
; CHECK1024-NEXT: .cfi_offset w29, -48
; CHECK1024-NEXT: addvl sp, sp, #-18
-; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x08, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 1072 + 144 * VG
+; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 1072 + 144 * VG
; CHECK1024-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK1024-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK1024-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
@@ -2148,16 +2148,16 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3,
; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1072 - 8 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1072 - 16 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1072 - 24 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1072 - 32 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1072 - 40 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1072 - 48 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1072 - 56 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1072 - 64 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1072
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1072
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1072
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1072
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1072
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1072
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1072
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1072
; CHECK1024-NEXT: sub sp, sp, #1024
-; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2096 + 144 * VG
+; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 2096 + 144 * VG
; CHECK1024-NEXT: mov x8, x0
; CHECK1024-NEXT: //APP
; CHECK1024-NEXT: //NO_APP
@@ -2180,7 +2180,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3,
; CHECK1024-NEXT: movk w0, #59491, lsl #16
; CHECK1024-NEXT: .cfi_restore vg
; CHECK1024-NEXT: add sp, sp, #1024
-; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x08, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 1072 + 144 * VG
+; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 1072 + 144 * VG
; CHECK1024-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK1024-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
; CHECK1024-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
@@ -2252,7 +2252,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8
; CHECK0-NEXT: .cfi_offset w30, -40
; CHECK0-NEXT: .cfi_offset w29, -48
; CHECK0-NEXT: addvl sp, sp, #-18
-; CHECK0-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 144 * VG
+; CHECK0-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x30, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 48 + 144 * VG
; CHECK0-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK0-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK0-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
@@ -2281,16 +2281,16 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8
; CHECK0-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK0-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK0-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 48 - 8 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 48 - 16 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 48 - 24 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 48 - 32 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 48 - 40 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 48 - 48 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 48 - 56 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 48 - 64 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d8 @ cfa - 8 * VG - 48
+; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d9 @ cfa - 16 * VG - 48
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d10 @ cfa - 24 * VG - 48
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d11 @ cfa - 32 * VG - 48
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d12 @ cfa - 40 * VG - 48
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d13 @ cfa - 48 * VG - 48
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d14 @ cfa - 56 * VG - 48
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d15 @ cfa - 64 * VG - 48
; CHECK0-NEXT: sub sp, sp, #48
-; CHECK0-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xe0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 96 + 144 * VG
+; CHECK0-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xe0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 96 + 144 * VG
; CHECK0-NEXT: //APP
; CHECK0-NEXT: //NO_APP
; CHECK0-NEXT: bl __arm_sme_state
@@ -2312,7 +2312,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8
; CHECK0-NEXT: movk w0, #59491, lsl #16
; CHECK0-NEXT: .cfi_restore vg
; CHECK0-NEXT: add sp, sp, #48
-; CHECK0-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 144 * VG
+; CHECK0-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x30, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 48 + 144 * VG
; CHECK0-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK0-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
; CHECK0-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
@@ -2376,7 +2376,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8
; CHECK64-NEXT: .cfi_offset w30, -40
; CHECK64-NEXT: .cfi_offset w29, -48
; CHECK64-NEXT: addvl sp, sp, #-18
-; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xf0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 112 + 144 * VG
+; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xf0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 112 + 144 * VG
; CHECK64-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK64-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK64-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
@@ -2405,16 +2405,16 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8
; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 112 - 8 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 112 - 16 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 112 - 24 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 112 - 32 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 112 - 40 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 112 - 48 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 112 - 56 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x90, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 112 - 64 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 112
+; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 112
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 112
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 112
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 112
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 112
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 112
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x90, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 112
; CHECK64-NEXT: sub sp, sp, #112
-; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xe0, 0x01, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 224 + 144 * VG
+; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xe0, 0x01, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 224 + 144 * VG
; CHECK64-NEXT: //APP
; CHECK64-NEXT: //NO_APP
; CHECK64-NEXT: bl __arm_sme_state
@@ -2436,7 +2436,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8
; CHECK64-NEXT: movk w0, #59491, lsl #16
; CHECK64-NEXT: .cfi_restore vg
; CHECK64-NEXT: add sp, sp, #112
-; CHECK64-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xf0, 0x00, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 112 + 144 * VG
+; CHECK64-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xf0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 112 + 144 * VG
; CHECK64-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK64-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
; CHECK64-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
@@ -2504,7 +2504,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8
; CHECK1024-NEXT: .cfi_offset w30, -40
; CHECK1024-NEXT: .cfi_offset w29, -48
; CHECK1024-NEXT: addvl sp, sp, #-18
-; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x08, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 1072 + 144 * VG
+; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 1072 + 144 * VG
; CHECK1024-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK1024-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK1024-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
@@ -2533,16 +2533,16 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8
; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1072 - 8 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1072 - 16 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1072 - 24 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1072 - 32 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1072 - 40 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1072 - 48 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1072 - 56 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xd0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1072 - 64 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1072
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1072
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1072
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1072
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1072
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1072
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1072
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xd0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1072
; CHECK1024-NEXT: sub sp, sp, #1072
-; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xe0, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 2144 + 144 * VG
+; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xe0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 2144 + 144 * VG
; CHECK1024-NEXT: //APP
; CHECK1024-NEXT: //NO_APP
; CHECK1024-NEXT: bl __arm_sme_state
@@ -2564,7 +2564,7 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8
; CHECK1024-NEXT: movk w0, #59491, lsl #16
; CHECK1024-NEXT: .cfi_restore vg
; CHECK1024-NEXT: add sp, sp, #1072
-; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0xb0, 0x08, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 1072 + 144 * VG
+; CHECK1024-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 1072 + 144 * VG
; CHECK1024-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK1024-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
; CHECK1024-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
@@ -3192,14 +3192,14 @@ define i32 @svecc_call_dynamic_alloca(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x
; CHECK0-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK0-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK0-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 64 - 8 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 64 - 16 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 64 - 24 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 64 - 32 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 64 - 40 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 64 - 48 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 64 - 56 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 64 - 64 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d8 @ cfa - 8 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d9 @ cfa - 16 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d10 @ cfa - 24 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d11 @ cfa - 32 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d12 @ cfa - 40 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d13 @ cfa - 48 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d14 @ cfa - 56 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d15 @ cfa - 64 * VG - 64
; CHECK0-NEXT: mov w9, w0
; CHECK0-NEXT: mov x8, sp
; CHECK0-NEXT: mov w2, w1
@@ -3327,14 +3327,14 @@ define i32 @svecc_call_dynamic_alloca(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x
; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 128 - 8 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 128 - 16 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 128 - 24 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 128 - 32 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 128 - 40 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 128 - 48 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 128 - 56 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 128 - 64 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 128
; CHECK64-NEXT: sub sp, sp, #64
; CHECK64-NEXT: mov w9, w0
; CHECK64-NEXT: mov x8, sp
@@ -3469,14 +3469,14 @@ define i32 @svecc_call_dynamic_alloca(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x
; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1088 - 8 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1088 - 16 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1088 - 24 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1088 - 32 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1088 - 40 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1088 - 48 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1088 - 56 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1088 - 64 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1088
; CHECK1024-NEXT: sub sp, sp, #1024
; CHECK1024-NEXT: mov w9, w0
; CHECK1024-NEXT: mov x8, sp
@@ -3616,14 +3616,14 @@ define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i
; CHECK0-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK0-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK0-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 64 - 8 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 64 - 16 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 64 - 24 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 64 - 32 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 64 - 40 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 64 - 48 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 64 - 56 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 64 - 64 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d8 @ cfa - 8 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d9 @ cfa - 16 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d10 @ cfa - 24 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d11 @ cfa - 32 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d12 @ cfa - 40 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d13 @ cfa - 48 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d14 @ cfa - 56 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d15 @ cfa - 64 * VG - 64
; CHECK0-NEXT: sub x9, sp, #1024
; CHECK0-NEXT: and sp, x9, #0xffffffffffffffe0
; CHECK0-NEXT: mov w2, w1
@@ -3743,14 +3743,14 @@ define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i
; CHECK64-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK64-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK64-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 128 - 8 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 128 - 16 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 128 - 24 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 128 - 32 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 128 - 40 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 128 - 48 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 128 - 56 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 128 - 64 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 128
; CHECK64-NEXT: sub x9, sp, #1088
; CHECK64-NEXT: and sp, x9, #0xffffffffffffffe0
; CHECK64-NEXT: mov w2, w1
@@ -3875,14 +3875,14 @@ define i32 @svecc_call_realign(<4 x i16> %P0, i32 %P1, i32 %P2, <vscale x 16 x i
; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1088 - 8 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1088 - 16 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1088 - 24 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1088 - 32 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1088 - 40 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1088 - 48 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1088 - 56 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1088 - 64 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1088
; CHECK1024-NEXT: sub x9, sp, #2048
; CHECK1024-NEXT: and sp, x9, #0xffffffffffffffe0
; CHECK1024-NEXT: mov w2, w1
@@ -4016,14 +4016,14 @@ define i32 @svecc_call_dynamic_and_scalable_alloca(<4 x i16> %P0, i32 %P1, i32 %
; CHECK0-NEXT: .cfi_offset w28, -48
; CHECK0-NEXT: .cfi_offset w30, -56
; CHECK0-NEXT: .cfi_offset w29, -64
-; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 64 - 8 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 64 - 16 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 64 - 24 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 64 - 32 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 64 - 40 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 64 - 48 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 64 - 56 * VG
-; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x40, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 64 - 64 * VG
+; CHECK0-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d8 @ cfa - 8 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d9 @ cfa - 16 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d10 @ cfa - 24 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d11 @ cfa - 32 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d12 @ cfa - 40 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d13 @ cfa - 48 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d14 @ cfa - 56 * VG - 64
+; CHECK0-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d15 @ cfa - 64 * VG - 64
; CHECK0-NEXT: // kill: def $w0 killed $w0 def $x0
; CHECK0-NEXT: ubfiz x8, x0, #2, #32
; CHECK0-NEXT: mov x9, sp
@@ -4125,14 +4125,14 @@ define i32 @svecc_call_dynamic_and_scalable_alloca(<4 x i16> %P0, i32 %P1, i32 %
; CHECK64-NEXT: .cfi_offset w28, -48
; CHECK64-NEXT: .cfi_offset w30, -56
; CHECK64-NEXT: .cfi_offset w29, -64
-; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 128 - 8 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 128 - 16 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 128 - 24 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 128 - 32 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 128 - 40 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 128 - 48 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 128 - 56 * VG
-; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0x80, 0x7f, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 128 - 64 * VG
+; CHECK64-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d8 @ cfa - 8 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d9 @ cfa - 16 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d10 @ cfa - 24 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d11 @ cfa - 32 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d12 @ cfa - 40 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d13 @ cfa - 48 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d14 @ cfa - 56 * VG - 128
+; CHECK64-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x80, 0x7f, 0x22 // $d15 @ cfa - 64 * VG - 128
; CHECK64-NEXT: // kill: def $w0 killed $w0 def $x0
; CHECK64-NEXT: ubfiz x8, x0, #2, #32
; CHECK64-NEXT: mov x9, sp
@@ -4240,14 +4240,14 @@ define i32 @svecc_call_dynamic_and_scalable_alloca(<4 x i16> %P0, i32 %P1, i32 %
; CHECK1024-NEXT: .cfi_offset w28, -48
; CHECK1024-NEXT: .cfi_offset w30, -56
; CHECK1024-NEXT: .cfi_offset w29, -64
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 1088 - 8 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 1088 - 16 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 1088 - 24 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 1088 - 32 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 1088 - 40 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 1088 - 48 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 1088 - 56 * VG
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x11, 0xc0, 0x77, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 1088 - 64 * VG
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * VG - 1088
+; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * VG - 1088
; CHECK1024-NEXT: // kill: def $w0 killed $w0 def $x0
; CHECK1024-NEXT: ubfiz x8, x0, #2, #32
; CHECK1024-NEXT: mov x9, sp
diff --git a/llvm/test/CodeGen/AArch64/stack-probing-sve.ll b/llvm/test/CodeGen/AArch64/stack-probing-sve.ll
index 56d865e..59b95be 100644
--- a/llvm/test/CodeGen/AArch64/stack-probing-sve.ll
+++ b/llvm/test/CodeGen/AArch64/stack-probing-sve.ll
@@ -18,7 +18,7 @@ define void @sve_1_vector(ptr %out) #0 {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: addvl sp, sp, #1
; CHECK-NEXT: .cfi_def_cfa wsp, 16
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -38,7 +38,7 @@ define void @sve_4_vector(ptr %out) #0 {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl sp, sp, #-4
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
; CHECK-NEXT: addvl sp, sp, #4
; CHECK-NEXT: .cfi_def_cfa wsp, 16
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -63,7 +63,7 @@ define void @sve_16_vector(ptr %out) #0 {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl sp, sp, #-16
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 128 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x01, 0x1e, 0x22 // sp + 16 + 128 * VG
; CHECK-NEXT: str xzr, [sp]
; CHECK-NEXT: addvl sp, sp, #16
; CHECK-NEXT: .cfi_def_cfa wsp, 16
@@ -103,7 +103,7 @@ define void @sve_17_vector(ptr %out) #0 {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl x9, sp, #-17
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 136 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x88, 0x01, 0x1e, 0x22 // $x9 + 16 + 136 * VG
; CHECK-NEXT: .LBB3_1: // %entry
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: sub sp, sp, #1, lsl #12 // =4096
@@ -155,9 +155,9 @@ define void @sve_1v_csr(<vscale x 4 x float> %a) #0 {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: str z8, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
; CHECK-NEXT: //APP
; CHECK-NEXT: //NO_APP
; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload
@@ -180,15 +180,15 @@ define void @sve_4v_csr(<vscale x 4 x float> %a) #0 {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl sp, sp, #-4
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
; CHECK-NEXT: str z11, [sp] // 16-byte Folded Spill
; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16
; CHECK-NEXT: //APP
; CHECK-NEXT: //NO_APP
; CHECK-NEXT: ldr z11, [sp] // 16-byte Folded Reload
@@ -217,7 +217,7 @@ define void @sve_16v_csr(<vscale x 4 x float> %a) #0 {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl sp, sp, #-16
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 128 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x01, 0x1e, 0x22 // sp + 16 + 128 * VG
; CHECK-NEXT: str xzr, [sp]
; CHECK-NEXT: str z23, [sp] // 16-byte Folded Spill
; CHECK-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill
@@ -235,14 +235,14 @@ define void @sve_16v_csr(<vscale x 4 x float> %a) #0 {
; CHECK-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16
; CHECK-NEXT: //APP
; CHECK-NEXT: //NO_APP
; CHECK-NEXT: ldr z23, [sp] // 16-byte Folded Reload
@@ -287,7 +287,7 @@ define void @sve_1p_csr(<vscale x 4 x float> %a) #0 {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: //APP
; CHECK-NEXT: //NO_APP
@@ -310,7 +310,7 @@ define void @sve_4p_csr(<vscale x 4 x float> %a) #0 {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: str p11, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p10, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p9, [sp, #6, mul vl] // 2-byte Folded Spill
@@ -339,7 +339,7 @@ define void @sve_16v_1p_csr(<vscale x 4 x float> %a) #0 {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl x9, sp, #-17
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 136 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x88, 0x01, 0x1e, 0x22 // $x9 + 16 + 136 * VG
; CHECK-NEXT: .LBB9_1: // %entry
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: sub sp, sp, #1, lsl #12 // =4096
@@ -370,14 +370,14 @@ define void @sve_16v_1p_csr(<vscale x 4 x float> %a) #0 {
; CHECK-NEXT: str z10, [sp, #14, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16
; CHECK-NEXT: //APP
; CHECK-NEXT: //NO_APP
; CHECK-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
@@ -426,7 +426,7 @@ define void @sve_1_vector_16_arr(ptr %out) #0 {
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 32 + 8 * VG
; CHECK-NEXT: addvl sp, sp, #1
; CHECK-NEXT: .cfi_def_cfa wsp, 32
; CHECK-NEXT: add sp, sp, #16
@@ -453,9 +453,9 @@ define void @sve_1_vector_4096_arr(ptr %out) #0 {
; CHECK-NEXT: sub x9, sp, #3, lsl #12 // =12288
; CHECK-NEXT: .cfi_def_cfa w9, 12304
; CHECK-NEXT: addvl x9, x9, #-32
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0f, 0x79, 0x00, 0x11, 0x90, 0xe0, 0x00, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 12304 + 256 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x79, 0x90, 0xe0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x02, 0x1e, 0x22 // $x9 + 12304 + 256 * VG
; CHECK-NEXT: addvl x9, x9, #-32
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0f, 0x79, 0x00, 0x11, 0x90, 0xe0, 0x00, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 12304 + 512 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x79, 0x90, 0xe0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x04, 0x1e, 0x22 // $x9 + 12304 + 512 * VG
; CHECK-NEXT: .LBB11_1: // %entry
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: sub sp, sp, #1, lsl #12 // =4096
@@ -470,9 +470,9 @@ define void @sve_1_vector_4096_arr(ptr %out) #0 {
; CHECK-NEXT: ldr xzr, [sp]
; CHECK-NEXT: .cfi_def_cfa_register wsp
; CHECK-NEXT: addvl sp, sp, #31
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0f, 0x8f, 0x00, 0x11, 0x90, 0xe0, 0x00, 0x22, 0x11, 0x88, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 12304 + 264 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x90, 0xe0, 0x00, 0x92, 0x2e, 0x00, 0x11, 0x88, 0x02, 0x1e, 0x22 // sp + 12304 + 264 * VG
; CHECK-NEXT: addvl sp, sp, #31
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0e, 0x8f, 0x00, 0x11, 0x90, 0xe0, 0x00, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 12304 + 16 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x90, 0xe0, 0x00, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 12304 + 16 * VG
; CHECK-NEXT: addvl sp, sp, #2
; CHECK-NEXT: .cfi_def_cfa wsp, 12304
; CHECK-NEXT: add sp, sp, #3, lsl #12 // =12288
@@ -538,38 +538,38 @@ define void @sve_1024_64k_guard(ptr %out) #0 "stack-probe-size"="65536" {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl sp, sp, #-32
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 256 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x02, 0x1e, 0x22 // sp + 16 + 256 * VG
; CHECK-NEXT: addvl sp, sp, #-32
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 512 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x04, 0x1e, 0x22 // sp + 16 + 512 * VG
; CHECK-NEXT: addvl sp, sp, #-32
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 768 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x06, 0x1e, 0x22 // sp + 16 + 768 * VG
; CHECK-NEXT: addvl sp, sp, #-32
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1024 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x08, 0x1e, 0x22 // sp + 16 + 1024 * VG
; CHECK-NEXT: addvl sp, sp, #-32
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1280 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0a, 0x1e, 0x22 // sp + 16 + 1280 * VG
; CHECK-NEXT: addvl sp, sp, #-32
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1536 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0c, 0x1e, 0x22 // sp + 16 + 1536 * VG
; CHECK-NEXT: addvl sp, sp, #-32
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1792 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0e, 0x1e, 0x22 // sp + 16 + 1792 * VG
; CHECK-NEXT: addvl sp, sp, #-32
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 2048 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x10, 0x1e, 0x22 // sp + 16 + 2048 * VG
; CHECK-NEXT: str xzr, [sp]
; CHECK-NEXT: addvl sp, sp, #31
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1800 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x88, 0x0e, 0x1e, 0x22 // sp + 16 + 1800 * VG
; CHECK-NEXT: addvl sp, sp, #31
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1552 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x0c, 0x1e, 0x22 // sp + 16 + 1552 * VG
; CHECK-NEXT: addvl sp, sp, #31
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x98, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1304 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x98, 0x0a, 0x1e, 0x22 // sp + 16 + 1304 * VG
; CHECK-NEXT: addvl sp, sp, #31
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1056 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x08, 0x1e, 0x22 // sp + 16 + 1056 * VG
; CHECK-NEXT: addvl sp, sp, #31
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa8, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 808 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa8, 0x06, 0x1e, 0x22 // sp + 16 + 808 * VG
; CHECK-NEXT: addvl sp, sp, #31
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb0, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 560 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xb0, 0x04, 0x1e, 0x22 // sp + 16 + 560 * VG
; CHECK-NEXT: addvl sp, sp, #31
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb8, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 312 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xb8, 0x02, 0x1e, 0x22 // sp + 16 + 312 * VG
; CHECK-NEXT: addvl sp, sp, #31
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG
; CHECK-NEXT: addvl sp, sp, #8
; CHECK-NEXT: .cfi_def_cfa wsp, 16
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -588,23 +588,23 @@ define void @sve_1028_64k_guard(ptr %out) #0 "stack-probe-size"="65536" {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl x9, sp, #-32
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 256 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x02, 0x1e, 0x22 // $x9 + 16 + 256 * VG
; CHECK-NEXT: addvl x9, x9, #-32
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 512 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x04, 0x1e, 0x22 // $x9 + 16 + 512 * VG
; CHECK-NEXT: addvl x9, x9, #-32
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 768 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x06, 0x1e, 0x22 // $x9 + 16 + 768 * VG
; CHECK-NEXT: addvl x9, x9, #-32
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 1024 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x08, 0x1e, 0x22 // $x9 + 16 + 1024 * VG
; CHECK-NEXT: addvl x9, x9, #-32
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 1280 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0a, 0x1e, 0x22 // $x9 + 16 + 1280 * VG
; CHECK-NEXT: addvl x9, x9, #-32
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 1536 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0c, 0x1e, 0x22 // $x9 + 16 + 1536 * VG
; CHECK-NEXT: addvl x9, x9, #-32
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 1792 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x0e, 0x1e, 0x22 // $x9 + 16 + 1792 * VG
; CHECK-NEXT: addvl x9, x9, #-32
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x80, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 2048 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x80, 0x10, 0x1e, 0x22 // $x9 + 16 + 2048 * VG
; CHECK-NEXT: addvl x9, x9, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x79, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $x9 + 16 + 2056 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x79, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x88, 0x10, 0x1e, 0x22 // $x9 + 16 + 2056 * VG
; CHECK-NEXT: .LBB14_1: // %entry
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: sub sp, sp, #16, lsl #12 // =65536
@@ -619,21 +619,21 @@ define void @sve_1028_64k_guard(ptr %out) #0 "stack-probe-size"="65536" {
; CHECK-NEXT: ldr xzr, [sp]
; CHECK-NEXT: .cfi_def_cfa_register wsp
; CHECK-NEXT: addvl sp, sp, #31
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x0e, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1808 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x0e, 0x1e, 0x22 // sp + 16 + 1808 * VG
; CHECK-NEXT: addvl sp, sp, #31
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x98, 0x0c, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1560 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x98, 0x0c, 0x1e, 0x22 // sp + 16 + 1560 * VG
; CHECK-NEXT: addvl sp, sp, #31
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x0a, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1312 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x0a, 0x1e, 0x22 // sp + 16 + 1312 * VG
; CHECK-NEXT: addvl sp, sp, #31
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa8, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 1064 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa8, 0x08, 0x1e, 0x22 // sp + 16 + 1064 * VG
; CHECK-NEXT: addvl sp, sp, #31
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb0, 0x06, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 816 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xb0, 0x06, 0x1e, 0x22 // sp + 16 + 816 * VG
; CHECK-NEXT: addvl sp, sp, #31
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xb8, 0x04, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 568 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xb8, 0x04, 0x1e, 0x22 // sp + 16 + 568 * VG
; CHECK-NEXT: addvl sp, sp, #31
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x02, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 320 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x02, 0x1e, 0x22 // sp + 16 + 320 * VG
; CHECK-NEXT: addvl sp, sp, #31
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc8, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc8, 0x00, 0x1e, 0x22 // sp + 16 + 72 * VG
; CHECK-NEXT: addvl sp, sp, #9
; CHECK-NEXT: .cfi_def_cfa wsp, 16
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -656,7 +656,7 @@ define void @sve_5_vector(ptr %out) #0 {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl sp, sp, #-5
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 40 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x28, 0x1e, 0x22 // sp + 16 + 40 * VG
; CHECK-NEXT: str xzr, [sp]
; CHECK-NEXT: addvl sp, sp, #5
; CHECK-NEXT: .cfi_def_cfa wsp, 16
@@ -682,21 +682,21 @@ define void @sve_unprobed_area(<vscale x 4 x float> %a, i32 %n) #0 {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl sp, sp, #-4
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
; CHECK-NEXT: str xzr, [sp]
; CHECK-NEXT: str p9, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
; CHECK-NEXT: addvl sp, sp, #-4
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG
; CHECK-NEXT: //APP
; CHECK-NEXT: //NO_APP
; CHECK-NEXT: addvl sp, sp, #4
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
; CHECK-NEXT: ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/stacksmash-arm64ec.ll b/llvm/test/CodeGen/AArch64/stacksmash-arm64ec.ll
index 0960133..bd41101 100644
--- a/llvm/test/CodeGen/AArch64/stacksmash-arm64ec.ll
+++ b/llvm/test/CodeGen/AArch64/stacksmash-arm64ec.ll
@@ -1,8 +1,10 @@
-; RUN: llc -mtriple=arm64ec-unknown-windows-gnu < %s | FileCheck %s
+; RUN: llc -mtriple=arm64ec-unknown-windows < %s | FileCheck -check-prefixes=CHECK,NONGNU %s
+; RUN: llc -mtriple=arm64ec-unknown-windows-gnu < %s | FileCheck -check-prefixes=CHECK,GNU %s
; CHECK-LABEL: func = "#func"
; CHECK: bl "#other"
-; CHECK: bl "#__stack_chk_fail"
+; NONGNU: bl "#__security_check_cookie_arm64ec"
+; GNU: bl "#__stack_chk_fail"
define void @func() #0 {
entry:
%buf = alloca [10 x i8], align 1
diff --git a/llvm/test/CodeGen/AArch64/sve-alloca.ll b/llvm/test/CodeGen/AArch64/sve-alloca.ll
index 2520095..8b7fa9e 100644
--- a/llvm/test/CodeGen/AArch64/sve-alloca.ll
+++ b/llvm/test/CodeGen/AArch64/sve-alloca.ll
@@ -46,14 +46,14 @@ define void @foo(<vscale x 4 x i64> %dst, i1 %cond) {
; CHECK-NEXT: .cfi_offset w28, -16
; CHECK-NEXT: .cfi_offset w30, -24
; CHECK-NEXT: .cfi_offset w29, -32
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 32 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 32 - 16 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 32 - 24 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 32 - 32 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 32 - 40 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 32 - 48 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 32 - 56 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x60, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 32 - 64 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d8 @ cfa - 8 * VG - 32
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d9 @ cfa - 16 * VG - 32
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d10 @ cfa - 24 * VG - 32
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d11 @ cfa - 32 * VG - 32
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d12 @ cfa - 40 * VG - 32
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d13 @ cfa - 48 * VG - 32
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d14 @ cfa - 56 * VG - 32
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x60, 0x22 // $d15 @ cfa - 64 * VG - 32
; CHECK-NEXT: rdvl x9, #2
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: add x9, x9, #15
diff --git a/llvm/test/CodeGen/AArch64/sve-callee-save-restore-pairs.ll b/llvm/test/CodeGen/AArch64/sve-callee-save-restore-pairs.ll
index 30a8396..254b8e0 100644
--- a/llvm/test/CodeGen/AArch64/sve-callee-save-restore-pairs.ll
+++ b/llvm/test/CodeGen/AArch64/sve-callee-save-restore-pairs.ll
@@ -43,17 +43,17 @@ define void @fbyte(<vscale x 16 x i8> %v){
; NOPAIR-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
; NOPAIR-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; NOPAIR-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
+; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG
; NOPAIR-NEXT: .cfi_offset w30, -8
; NOPAIR-NEXT: .cfi_offset w29, -16
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16
; NOPAIR-NEXT: bl my_func
; NOPAIR-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; NOPAIR-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -113,17 +113,17 @@ define void @fbyte(<vscale x 16 x i8> %v){
; PAIR-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
; PAIR-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; PAIR-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; PAIR-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
+; PAIR-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG
; PAIR-NEXT: .cfi_offset w30, -8
; PAIR-NEXT: .cfi_offset w29, -16
-; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
-; PAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
-; PAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG
-; PAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
-; PAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
-; PAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
+; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
+; PAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16
+; PAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16
+; PAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16
+; PAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16
+; PAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16
; PAIR-NEXT: bl my_func
; PAIR-NEXT: ptrue pn8.b
; PAIR-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
@@ -187,17 +187,17 @@ define void @fhalf(<vscale x 8 x half> %v) {
; NOPAIR-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
; NOPAIR-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; NOPAIR-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
+; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG
; NOPAIR-NEXT: .cfi_offset w30, -8
; NOPAIR-NEXT: .cfi_offset w29, -16
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16
; NOPAIR-NEXT: bl my_func
; NOPAIR-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; NOPAIR-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -257,17 +257,17 @@ define void @fhalf(<vscale x 8 x half> %v) {
; PAIR-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
; PAIR-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; PAIR-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; PAIR-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
+; PAIR-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG
; PAIR-NEXT: .cfi_offset w30, -8
; PAIR-NEXT: .cfi_offset w29, -16
-; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
-; PAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
-; PAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG
-; PAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
-; PAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
-; PAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
+; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
+; PAIR-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16
+; PAIR-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16
+; PAIR-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16
+; PAIR-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16
+; PAIR-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16
; PAIR-NEXT: bl my_func
; PAIR-NEXT: ptrue pn8.b
; PAIR-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
@@ -310,11 +310,11 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_p_regs() {
; NOPAIR-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill
; NOPAIR-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill
; NOPAIR-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill
-; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; NOPAIR-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
; NOPAIR-NEXT: .cfi_offset w29, -16
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
; NOPAIR-NEXT: //APP
; NOPAIR-NEXT: //NO_APP
; NOPAIR-NEXT: ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
@@ -336,11 +336,11 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_p_regs() {
; PAIR-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; PAIR-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill
; PAIR-NEXT: st1b { z8.b, z9.b }, pn8, [sp, #2, mul vl] // 32-byte Folded Spill
-; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; PAIR-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
; PAIR-NEXT: .cfi_offset w29, -16
-; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
+; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
; PAIR-NEXT: //APP
; PAIR-NEXT: //NO_APP
; PAIR-NEXT: ptrue pn8.b
@@ -368,11 +368,11 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_p_regs2() {
; NOPAIR-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill
; NOPAIR-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill
; NOPAIR-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill
-; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; NOPAIR-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
; NOPAIR-NEXT: .cfi_offset w29, -16
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
; NOPAIR-NEXT: //APP
; NOPAIR-NEXT: //NO_APP
; NOPAIR-NEXT: ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
@@ -393,11 +393,11 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_p_regs2() {
; PAIR-NEXT: str p10, [sp, #6, mul vl] // 2-byte Folded Spill
; PAIR-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill
; PAIR-NEXT: st1b { z8.b, z9.b }, pn9, [sp, #2, mul vl] // 32-byte Folded Spill
-; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; PAIR-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
; PAIR-NEXT: .cfi_offset w29, -16
-; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
+; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
; PAIR-NEXT: //APP
; PAIR-NEXT: //NO_APP
; PAIR-NEXT: ptrue pn9.b
@@ -421,10 +421,10 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_regs() {
; NOPAIR-NEXT: addvl sp, sp, #-2
; NOPAIR-NEXT: str z9, [sp] // 16-byte Folded Spill
; NOPAIR-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill
-; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; NOPAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; NOPAIR-NEXT: .cfi_offset w29, -16
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
; NOPAIR-NEXT: //APP
; NOPAIR-NEXT: //NO_APP
; NOPAIR-NEXT: ldr z9, [sp] // 16-byte Folded Reload
@@ -440,10 +440,10 @@ define aarch64_sve_vector_pcs void @test_clobbers_z_regs() {
; PAIR-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
; PAIR-NEXT: str z9, [sp, #1, mul vl] // 16-byte Folded Spill
; PAIR-NEXT: str z8, [sp, #2, mul vl] // 16-byte Folded Spill
-; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; PAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; PAIR-NEXT: .cfi_offset w29, -16
-; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
+; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; PAIR-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
; PAIR-NEXT: //APP
; PAIR-NEXT: //NO_APP
; PAIR-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
@@ -494,10 +494,10 @@ define aarch64_sve_vector_pcs void @test_clobbers_2_z_regs_negative() {
; NOPAIR-NEXT: addvl sp, sp, #-2
; NOPAIR-NEXT: str z10, [sp] // 16-byte Folded Spill
; NOPAIR-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill
-; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; NOPAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; NOPAIR-NEXT: .cfi_offset w29, -16
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 16 * VG
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; NOPAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 16 * VG - 16
; NOPAIR-NEXT: //APP
; NOPAIR-NEXT: //NO_APP
; NOPAIR-NEXT: ldr z10, [sp] // 16-byte Folded Reload
@@ -512,10 +512,10 @@ define aarch64_sve_vector_pcs void @test_clobbers_2_z_regs_negative() {
; PAIR-NEXT: addvl sp, sp, #-2
; PAIR-NEXT: str z10, [sp] // 16-byte Folded Spill
; PAIR-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill
-; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; PAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; PAIR-NEXT: .cfi_offset w29, -16
-; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 16 * VG
+; PAIR-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; PAIR-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 16 * VG - 16
; PAIR-NEXT: //APP
; PAIR-NEXT: //NO_APP
; PAIR-NEXT: ldr z10, [sp] // 16-byte Folded Reload
@@ -536,7 +536,7 @@ define aarch64_sve_vector_pcs void @test_clobbers_p_reg_negative() {
; NOPAIR-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; NOPAIR-NEXT: addvl sp, sp, #-1
; NOPAIR-NEXT: str p10, [sp, #7, mul vl] // 2-byte Folded Spill
-; NOPAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; NOPAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; NOPAIR-NEXT: .cfi_offset w29, -16
; NOPAIR-NEXT: //APP
; NOPAIR-NEXT: //NO_APP
@@ -550,7 +550,7 @@ define aarch64_sve_vector_pcs void @test_clobbers_p_reg_negative() {
; PAIR-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; PAIR-NEXT: addvl sp, sp, #-1
; PAIR-NEXT: str p10, [sp, #7, mul vl] // 2-byte Folded Spill
-; PAIR-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; PAIR-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; PAIR-NEXT: .cfi_offset w29, -16
; PAIR-NEXT: //APP
; PAIR-NEXT: //NO_APP
diff --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
index 5e4c891..9066051 100644
--- a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
+++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
@@ -438,7 +438,7 @@ define void @non_sve_caller_non_sve_callee_high_range() {
; CHECK: // %bb.0:
; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: movi d0, #0000000000000000
@@ -464,7 +464,7 @@ define void @non_sve_caller_high_range_non_sve_callee_high_range(float %f0, floa
; CHECK: // %bb.0:
; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: movi d0, #0000000000000000
@@ -523,17 +523,17 @@ define <vscale x 4 x float> @sve_caller_non_sve_callee_high_range(<vscale x 4 x
; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-3
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa8, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 168 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa8, 0x01, 0x1e, 0x22 // sp + 16 + 168 * VG
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16
; CHECK-NEXT: mov z25.d, z0.d
; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill
; CHECK-NEXT: movi d0, #0000000000000000
@@ -621,17 +621,17 @@ define <vscale x 4 x float> @sve_ret_caller_non_sve_callee_high_range() {
; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x01, 0x1e, 0x22 // sp + 16 + 160 * VG
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16
; CHECK-NEXT: movi d0, #0000000000000000
; CHECK-NEXT: fmov s1, #1.00000000
; CHECK-NEXT: addvl x0, sp, #1
@@ -686,7 +686,7 @@ define void @verify_all_operands_are_initialised() {
; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 32 + 8 * VG
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: movi d0, #0000000000000000
diff --git a/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll
index d02aa06..6c6a691 100644
--- a/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll
@@ -8,7 +8,7 @@ define <4 x i32> @extract_v4i32_nxv16i32_12(<vscale x 16 x i32> %arg) {
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-4
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: str z3, [sp, #3, mul vl]
; CHECK-NEXT: str z2, [sp, #2, mul vl]
@@ -27,7 +27,7 @@ define <8 x i16> @extract_v8i16_nxv32i16_8(<vscale x 32 x i16> %arg) {
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: str z1, [sp, #1, mul vl]
; CHECK-NEXT: str z0, [sp]
@@ -44,7 +44,7 @@ define <4 x i16> @extract_v4i16_nxv32i16_8(<vscale x 32 x i16> %arg) {
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-4
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: str z3, [sp, #3, mul vl]
; CHECK-NEXT: str z2, [sp, #2, mul vl]
@@ -65,7 +65,7 @@ define <2 x i16> @extract_v2i16_nxv32i16_8(<vscale x 32 x i16> %arg) {
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-8
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: str z3, [sp, #3, mul vl]
@@ -94,7 +94,7 @@ define <2 x i64> @extract_v2i64_nxv8i64_8(<vscale x 8 x i64> %arg) {
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-4
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: cnth x8
; CHECK-NEXT: mov w9, #8 // =0x8
@@ -120,7 +120,7 @@ define <4 x float> @extract_v4f32_nxv16f32_12(<vscale x 16 x float> %arg) {
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-4
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: str z3, [sp, #3, mul vl]
; CHECK-NEXT: str z2, [sp, #2, mul vl]
@@ -168,7 +168,7 @@ define <4 x i1> @extract_v4i1_nxv32i1_16(<vscale x 32 x i1> %arg) {
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-8
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: mov z0.b, p1/z, #1 // =0x1
; CHECK-NEXT: mov z1.b, p0/z, #1 // =0x1
@@ -224,7 +224,7 @@ define <4 x i3> @extract_v4i3_nxv32i3_16(<vscale x 32 x i3> %arg) {
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-8
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xc0, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: str z1, [sp, #1, mul vl]
@@ -271,7 +271,7 @@ define <4 x i64> @extract_v4i64_nxv8i64_0(<vscale x 8 x i64> %arg) {
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: str z1, [sp, #1, mul vl]
; CHECK-NEXT: str z0, [sp]
diff --git a/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll
index cbede1b..4aaa25e 100644
--- a/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll
@@ -63,7 +63,7 @@ define <vscale x 14 x i1> @extract_nxv14i1_nxv28i1_14(<vscale x 28 x i1> %in) uw
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: punpkhi p2.h, p1.b
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: punpklo p1.h, p1.b
diff --git a/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll b/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll
index 4b93900..8750867 100644
--- a/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fp-reduce-fadda.ll
@@ -49,7 +49,7 @@ define half @fadda_nxv6f16(<vscale x 6 x half> %v, half %s) {
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: mov w8, #32768 // =0x8000
; CHECK-NEXT: ptrue p0.d
@@ -73,7 +73,7 @@ define half @fadda_nxv10f16(<vscale x 10 x half> %v, half %s) {
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-3
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: // kill: def $h2 killed $h2 def $z2
diff --git a/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll b/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll
index 1b6b92a..4374409 100644
--- a/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fptosi-sat.ll
@@ -254,7 +254,7 @@ define <vscale x 8 x i32> @test_signed_v8f64_v8i32(<vscale x 8 x double> %f) {
; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: mov x8, #-4476578029606273024 // =0xc1e0000000000000
; CHECK-NEXT: ptrue p0.d
@@ -341,7 +341,7 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) {
; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: mov x8, #-4548635623644200960 // =0xc0e0000000000000
; CHECK-NEXT: ptrue p0.d
diff --git a/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll b/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll
index b3aefb8..1df2819 100644
--- a/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fptoui-sat.ll
@@ -208,7 +208,7 @@ define <vscale x 8 x i32> @test_signed_v8f64_v8i32(<vscale x 8 x double> %f) {
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov x8, #281474974613504 // =0xffffffe00000
@@ -275,7 +275,7 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) {
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov x8, #281337537757184 // =0xffe000000000
diff --git a/llvm/test/CodeGen/AArch64/sve-insert-element.ll b/llvm/test/CodeGen/AArch64/sve-insert-element.ll
index 7f558e3..8ca005a 100644
--- a/llvm/test/CodeGen/AArch64/sve-insert-element.ll
+++ b/llvm/test/CodeGen/AArch64/sve-insert-element.ll
@@ -588,7 +588,7 @@ define <vscale x 32 x i1> @test_predicate_insert_32xi1(<vscale x 32 x i1> %val,
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl sp, sp, #-2
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: rdvl x8, #2
; CHECK-NEXT: mov z0.b, p1/z, #1 // =0x1
; CHECK-NEXT: mov z1.b, p0/z, #1 // =0x1
diff --git a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
index dcf3317..73c783d 100644
--- a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
@@ -186,7 +186,7 @@ define void @insert_v2i64_nxv16i64(<2 x i64> %sv0, <2 x i64> %sv1, ptr %out) uwt
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl sp, sp, #-4
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: str z0, [sp]
; CHECK-NEXT: str q1, [sp, #32]
@@ -229,7 +229,7 @@ define void @insert_v2i64_nxv16i64_lo2(ptr %psv, ptr %out) uwtable {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl sp, sp, #-2
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: str q0, [sp, #16]
; CHECK-NEXT: ldr z0, [sp, #1, mul vl]
@@ -896,7 +896,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_0(<vscale x 16 x i1> %vec, <vsc
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: punpklo p2.h, p0.b
; CHECK-NEXT: punpkhi p0.h, p0.b
@@ -923,7 +923,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_1(<vscale x 16 x i1> %vec, <vsc
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: punpklo p2.h, p0.b
; CHECK-NEXT: punpkhi p0.h, p0.b
@@ -950,7 +950,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_2(<vscale x 16 x i1> %vec, <vsc
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: punpklo p2.h, p0.b
; CHECK-NEXT: punpkhi p0.h, p0.b
@@ -977,7 +977,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_3(<vscale x 16 x i1> %vec, <vsc
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: punpklo p2.h, p0.b
; CHECK-NEXT: punpkhi p0.h, p0.b
@@ -1004,7 +1004,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_4(<vscale x 16 x i1> %vec, <vsc
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: punpklo p2.h, p0.b
; CHECK-NEXT: punpkhi p0.h, p0.b
@@ -1031,7 +1031,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_5(<vscale x 16 x i1> %vec, <vsc
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: punpklo p2.h, p0.b
; CHECK-NEXT: punpkhi p0.h, p0.b
@@ -1058,7 +1058,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_6(<vscale x 16 x i1> %vec, <vsc
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: punpklo p2.h, p0.b
; CHECK-NEXT: punpkhi p0.h, p0.b
@@ -1085,7 +1085,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_7(<vscale x 16 x i1> %vec, <vsc
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: punpklo p2.h, p0.b
; CHECK-NEXT: punpkhi p0.h, p0.b
@@ -1112,7 +1112,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_8(<vscale x 16 x i1> %vec, <vsc
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: punpkhi p2.h, p0.b
; CHECK-NEXT: punpklo p0.h, p0.b
@@ -1139,7 +1139,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_9(<vscale x 16 x i1> %vec, <vsc
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: punpkhi p2.h, p0.b
; CHECK-NEXT: punpklo p0.h, p0.b
@@ -1166,7 +1166,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_10(<vscale x 16 x i1> %vec, <vs
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: punpkhi p2.h, p0.b
; CHECK-NEXT: punpklo p0.h, p0.b
@@ -1193,7 +1193,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_11(<vscale x 16 x i1> %vec, <vs
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: punpkhi p2.h, p0.b
; CHECK-NEXT: punpklo p0.h, p0.b
@@ -1220,7 +1220,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_12(<vscale x 16 x i1> %vec, <vs
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: punpkhi p2.h, p0.b
; CHECK-NEXT: punpklo p0.h, p0.b
@@ -1247,7 +1247,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_13(<vscale x 16 x i1> %vec, <vs
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: punpkhi p2.h, p0.b
; CHECK-NEXT: punpklo p0.h, p0.b
@@ -1274,7 +1274,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_14(<vscale x 16 x i1> %vec, <vs
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: punpkhi p2.h, p0.b
; CHECK-NEXT: punpklo p0.h, p0.b
@@ -1301,7 +1301,7 @@ define <vscale x 16 x i1> @insert_nxv1i1_nxv16i1_15(<vscale x 16 x i1> %vec, <vs
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: punpkhi p2.h, p0.b
; CHECK-NEXT: punpklo p0.h, p0.b
diff --git a/llvm/test/CodeGen/AArch64/sve-ldnf1.mir b/llvm/test/CodeGen/AArch64/sve-ldnf1.mir
index 6d09425..2a7e8a43c 100644
--- a/llvm/test/CodeGen/AArch64/sve-ldnf1.mir
+++ b/llvm/test/CodeGen/AArch64/sve-ldnf1.mir
@@ -41,13 +41,13 @@ body: |
liveins: $p0
; CHECK-LABEL: name: testcase_positive_offset
- ; CHECK: liveins: $p0
+ ; CHECK: liveins: $p0, $fp
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2)
; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
- ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4
- ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22
+ ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22
; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2)
; CHECK-NEXT: renamable $z0 = LDNF1B_H_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2)
; CHECK-NEXT: renamable $z0 = LDNF1B_S_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2)
@@ -64,7 +64,7 @@ body: |
; CHECK-NEXT: renamable $z0 = LDNF1W_D_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8)
; CHECK-NEXT: renamable $z0 = LDNF1SW_D_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8)
; CHECK-NEXT: renamable $z0 = LDNF1D_IMM renamable $p0, $sp, 7, implicit $ffr, implicit-def $ffr :: (load (s64) from %ir.object)
- ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4
+ ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
@@ -100,13 +100,13 @@ body: |
liveins: $p0
; CHECK-LABEL: name: testcase_negative_offset
- ; CHECK: liveins: $p0
+ ; CHECK: liveins: $p0, $fp
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2)
; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
- ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4
- ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22
+ ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22
; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2)
; CHECK-NEXT: renamable $z0 = LDNF1B_H_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2)
; CHECK-NEXT: renamable $z0 = LDNF1B_S_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2)
@@ -123,7 +123,7 @@ body: |
; CHECK-NEXT: renamable $z0 = LDNF1W_D_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8)
; CHECK-NEXT: renamable $z0 = LDNF1SW_D_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8)
; CHECK-NEXT: renamable $z0 = LDNF1D_IMM renamable $p0, $sp, -8, implicit $ffr, implicit-def $ffr :: (load (s64) from %ir.object)
- ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4
+ ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
@@ -159,44 +159,44 @@ body: |
liveins: $p0
; CHECK-LABEL: name: testcase_positive_offset_out_of_range
- ; CHECK: liveins: $p0
+ ; CHECK: liveins: $p0, $fp
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2)
; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
- ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4
- ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1
+ ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1B_H_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1B_S_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 1
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 1, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1B_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1SB_H_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1SB_S_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 1
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 1, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1SB_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2)
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1H_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1H_S_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1H_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1SH_S_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 2, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1SH_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object)
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1W_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1W_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, 4, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1SW_D_IMM renamable $p0, killed $x8, 7, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8)
- ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4
+ ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
@@ -231,44 +231,44 @@ body: |
liveins: $p0
; CHECK-LABEL: name: testcase_negative_offset_out_of_range
- ; CHECK: liveins: $p0
+ ; CHECK: liveins: $p0, $fp
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2)
; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
- ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4
- ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1
+ ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1B_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1B_H_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1B_S_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -1
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -1, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1B_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1SB_H_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1SB_S_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -1
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -1, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1SB_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s8) from %ir.object, align 2)
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1H_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1H_S_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1H_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1SH_S_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -2, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1SH_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s16) from %ir.object)
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1W_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1W_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8)
- ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4
+ ; CHECK-NEXT: $x8 = ADDPL_XXI $sp, -4, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNF1SW_D_IMM renamable $p0, killed $x8, -8, implicit $ffr, implicit-def $ffr :: (load (s32) from %ir.object, align 8)
- ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4
+ ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
diff --git a/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir b/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir
index 1352b9d..863d4d1 100644
--- a/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir
+++ b/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir
@@ -41,13 +41,13 @@ body: |
liveins: $p0
; CHECK-LABEL: name: testcase_positive_offset
- ; CHECK: liveins: $p0
+ ; CHECK: liveins: $p0, $fp
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2)
; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
- ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4
- ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22
+ ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22
; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, $sp, 7 :: (load (s8) from %ir.object, align 2)
; CHECK-NEXT: renamable $z0 = LDNT1H_ZRI renamable $p0, $sp, 7 :: (load (s16) from %ir.object)
; CHECK-NEXT: renamable $z0 = LDNT1W_ZRI renamable $p0, $sp, 7 :: (load (s32) from %ir.object, align 8)
@@ -56,7 +56,7 @@ body: |
; CHECK-NEXT: STNT1H_ZRI renamable $z0, renamable $p0, $sp, 7 :: (store (s16) into %ir.object, align 8)
; CHECK-NEXT: STNT1W_ZRI renamable $z0, renamable $p0, $sp, 7 :: (store (s32) into %ir.object, align 8)
; CHECK-NEXT: STNT1D_ZRI renamable $z0, renamable $p0, $sp, 7 :: (store (s64) into %ir.object)
- ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4
+ ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
@@ -84,13 +84,13 @@ body: |
liveins: $p0
; CHECK-LABEL: name: testcase_negative_offset
- ; CHECK: liveins: $p0
+ ; CHECK: liveins: $p0, $fp
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2)
; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
- ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4
- ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22
+ ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22
; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, $sp, -8 :: (load (s8) from %ir.object, align 2)
; CHECK-NEXT: renamable $z0 = LDNT1H_ZRI renamable $p0, $sp, -8 :: (load (s16) from %ir.object)
; CHECK-NEXT: renamable $z0 = LDNT1W_ZRI renamable $p0, $sp, -8 :: (load (s32) from %ir.object)
@@ -99,7 +99,7 @@ body: |
; CHECK-NEXT: STNT1H_ZRI renamable $z0, renamable $p0, $sp, -8 :: (store (s16) into %ir.object, align 8)
; CHECK-NEXT: STNT1W_ZRI renamable $z0, renamable $p0, $sp, -8 :: (store (s32) into %ir.object, align 8)
; CHECK-NEXT: STNT1D_ZRI renamable $z0, renamable $p0, $sp, -8 :: (store (s64) into %ir.object)
- ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4
+ ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
@@ -127,30 +127,30 @@ body: |
liveins: $p0
; CHECK-LABEL: name: testcase_positive_offset_out_of_range
- ; CHECK: liveins: $p0
+ ; CHECK: liveins: $p0, $fp
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2)
; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
- ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4
- ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1
+ ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, killed $x8, 7 :: (load (s8) from %ir.object, align 2)
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNT1H_ZRI renamable $p0, killed $x8, 7 :: (load (s16) from %ir.object)
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNT1W_ZRI renamable $p0, killed $x8, 7 :: (load (s32) from %ir.object)
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNT1D_ZRI renamable $p0, killed $x8, 7 :: (load (s64) from %ir.object)
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg
; CHECK-NEXT: STNT1B_ZRI renamable $z0, renamable $p0, killed $x8, 7 :: (store (s8) into %ir.object, align 8)
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg
; CHECK-NEXT: STNT1H_ZRI renamable $z0, renamable $p0, killed $x8, 7 :: (store (s16) into %ir.object, align 8)
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg
; CHECK-NEXT: STNT1W_ZRI renamable $z0, renamable $p0, killed $x8, 7 :: (store (s32) into %ir.object, align 8)
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 1, implicit $vg
; CHECK-NEXT: STNT1D_ZRI renamable $z0, renamable $p0, killed $x8, 7 :: (store (s64) into %ir.object)
- ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4
+ ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
@@ -178,30 +178,30 @@ body: |
liveins: $p0
; CHECK-LABEL: name: testcase_negative_offset_out_of_range
- ; CHECK: liveins: $p0
+ ; CHECK: liveins: $p0, $fp
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2)
; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
- ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4
- ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1
+ ; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -4, implicit $vg
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNT1B_ZRI renamable $p0, killed $x8, -8 :: (load (s8) from %ir.object, align 2)
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNT1H_ZRI renamable $p0, killed $x8, -8 :: (load (s16) from %ir.object)
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNT1W_ZRI renamable $p0, killed $x8, -8 :: (load (s32) from %ir.object)
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg
; CHECK-NEXT: renamable $z0 = LDNT1D_ZRI renamable $p0, killed $x8, -8 :: (load (s64) from %ir.object)
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg
; CHECK-NEXT: STNT1B_ZRI renamable $z0, renamable $p0, killed $x8, -8 :: (store (s8) into %ir.object, align 8)
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg
; CHECK-NEXT: STNT1H_ZRI renamable $z0, renamable $p0, killed $x8, -8 :: (store (s16) into %ir.object, align 8)
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg
; CHECK-NEXT: STNT1W_ZRI renamable $z0, renamable $p0, killed $x8, -8 :: (store (s32) into %ir.object, align 8)
- ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1
+ ; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -1, implicit $vg
; CHECK-NEXT: STNT1D_ZRI renamable $z0, renamable $p0, killed $x8, -8 :: (store (s64) into %ir.object)
- ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4
+ ; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 4, implicit $vg
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
; CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
diff --git a/llvm/test/CodeGen/AArch64/sve-llrint.ll b/llvm/test/CodeGen/AArch64/sve-llrint.ll
index b0198cf..12d4918 100644
--- a/llvm/test/CodeGen/AArch64/sve-llrint.ll
+++ b/llvm/test/CodeGen/AArch64/sve-llrint.ll
@@ -88,7 +88,7 @@ define <vscale x 8 x i64> @llrint_v8i64_v8f16(<vscale x 8 x half> %x) {
; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: uunpklo z1.s, z0.h
; CHECK-NEXT: uunpkhi z0.s, z0.h
@@ -161,11 +161,11 @@ define <vscale x 16 x i64> @llrint_v16i64_v16f16(<vscale x 16 x half> %x) {
; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
; CHECK-NEXT: uunpklo z2.s, z0.h
; CHECK-NEXT: uunpkhi z0.s, z0.h
; CHECK-NEXT: mov w8, #64511 // =0xfbff
@@ -299,16 +299,16 @@ define <vscale x 32 x i64> @llrint_v32i64_v32f16(<vscale x 32 x half> %x) {
; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16
; CHECK-NEXT: uunpklo z4.s, z0.h
; CHECK-NEXT: uunpkhi z0.s, z0.h
; CHECK-NEXT: mov w9, #64511 // =0xfbff
@@ -614,7 +614,7 @@ define <vscale x 8 x i64> @llrint_v8i64_v8f32(<vscale x 8 x float> %x) {
; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: uunpklo z2.d, z0.s
; CHECK-NEXT: uunpkhi z0.d, z0.s
@@ -684,11 +684,11 @@ define <vscale x 16 x i64> @llrint_v16i64_v16f32(<vscale x 16 x float> %x) {
; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
; CHECK-NEXT: uunpklo z4.d, z0.s
; CHECK-NEXT: uunpkhi z0.d, z0.s
; CHECK-NEXT: mov w8, #-553648128 // =0xdf000000
@@ -818,16 +818,16 @@ define <vscale x 32 x i64> @llrint_v32i64_v32f32(<vscale x 32 x float> %x) {
; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16
; CHECK-NEXT: uunpklo z24.d, z0.s
; CHECK-NEXT: uunpkhi z25.d, z0.s
; CHECK-NEXT: mov w9, #-553648128 // =0xdf000000
@@ -1125,7 +1125,7 @@ define <vscale x 8 x i64> @llrint_v8i64_v8f64(<vscale x 8 x double> %x) {
; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov x8, #-4332462841530417152 // =0xc3e0000000000000
@@ -1190,10 +1190,10 @@ define <vscale x 16 x i64> @llrint_v16f64(<vscale x 16 x double> %x) {
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str z9, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov x8, #-4332462841530417152 // =0xc3e0000000000000
; CHECK-NEXT: mov z26.d, #0x8000000000000000
@@ -1312,16 +1312,16 @@ define <vscale x 32 x i64> @llrint_v32f64(<vscale x 32 x double> %x) {
; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16
; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ldr z2, [x0, #2, mul vl]
diff --git a/llvm/test/CodeGen/AArch64/sve-lrint.ll b/llvm/test/CodeGen/AArch64/sve-lrint.ll
index aa586390..58ac53d 100644
--- a/llvm/test/CodeGen/AArch64/sve-lrint.ll
+++ b/llvm/test/CodeGen/AArch64/sve-lrint.ll
@@ -89,7 +89,7 @@ define <vscale x 8 x iXLen> @lrint_v8f16(<vscale x 8 x half> %x) {
; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: uunpklo z1.s, z0.h
; CHECK-NEXT: uunpkhi z0.s, z0.h
@@ -162,11 +162,11 @@ define <vscale x 16 x iXLen> @lrint_v16f16(<vscale x 16 x half> %x) {
; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
; CHECK-NEXT: uunpklo z2.s, z0.h
; CHECK-NEXT: uunpkhi z0.s, z0.h
; CHECK-NEXT: mov w8, #64511 // =0xfbff
@@ -300,16 +300,16 @@ define <vscale x 32 x iXLen> @lrint_v32f16(<vscale x 32 x half> %x) {
; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16
; CHECK-NEXT: uunpklo z4.s, z0.h
; CHECK-NEXT: uunpkhi z0.s, z0.h
; CHECK-NEXT: mov w9, #64511 // =0xfbff
@@ -615,7 +615,7 @@ define <vscale x 8 x iXLen> @lrint_v8f32(<vscale x 8 x float> %x) {
; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: uunpklo z2.d, z0.s
; CHECK-NEXT: uunpkhi z0.d, z0.s
@@ -685,11 +685,11 @@ define <vscale x 16 x iXLen> @lrint_v16f32(<vscale x 16 x float> %x) {
; CHECK-NEXT: str z10, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z9, [sp, #2, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
; CHECK-NEXT: uunpklo z4.d, z0.s
; CHECK-NEXT: uunpkhi z0.d, z0.s
; CHECK-NEXT: mov w8, #-553648128 // =0xdf000000
@@ -819,16 +819,16 @@ define <vscale x 32 x iXLen> @lrint_v32f32(<vscale x 32 x float> %x) {
; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16
; CHECK-NEXT: uunpklo z24.d, z0.s
; CHECK-NEXT: uunpkhi z25.d, z0.s
; CHECK-NEXT: mov w9, #-553648128 // =0xdf000000
@@ -1126,7 +1126,7 @@ define <vscale x 8 x iXLen> @lrint_v8f64(<vscale x 8 x double> %x) {
; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov x8, #-4332462841530417152 // =0xc3e0000000000000
@@ -1191,10 +1191,10 @@ define <vscale x 16 x iXLen> @lrint_v16f64(<vscale x 16 x double> %x) {
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str z9, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov x8, #-4332462841530417152 // =0xc3e0000000000000
; CHECK-NEXT: mov z26.d, #0x8000000000000000
@@ -1313,16 +1313,16 @@ define <vscale x 32 x iXLen> @lrint_v32f64(<vscale x 32 x double> %x) {
; CHECK-NEXT: str z9, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16
; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ldr z2, [x0, #2, mul vl]
diff --git a/llvm/test/CodeGen/AArch64/sve-pred-arith.ll b/llvm/test/CodeGen/AArch64/sve-pred-arith.ll
index 6e08606..24df76b 100644
--- a/llvm/test/CodeGen/AArch64/sve-pred-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-pred-arith.ll
@@ -53,7 +53,7 @@ define aarch64_sve_vector_pcs <vscale x 64 x i1> @add_nxv64i1(<vscale x 64 x i1>
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
@@ -137,7 +137,7 @@ define aarch64_sve_vector_pcs <vscale x 64 x i1> @sub_nxv64i1(<vscale x 64 x i1>
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
diff --git a/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll b/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll
index 9a4231a..0bc8cb8 100644
--- a/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll
+++ b/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll
@@ -20,7 +20,7 @@ define i8 @split_extract_32i8_idx(<vscale x 32 x i8> %a, i32 %idx) {
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: rdvl x8, #2
; CHECK-NEXT: mov w9, w0
@@ -43,7 +43,7 @@ define i16 @split_extract_16i16_idx(<vscale x 16 x i16> %a, i32 %idx) {
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: rdvl x8, #1
; CHECK-NEXT: mov w9, w0
@@ -66,7 +66,7 @@ define i32 @split_extract_8i32_idx(<vscale x 8 x i32> %a, i32 %idx) {
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: cnth x8
; CHECK-NEXT: mov w9, w0
@@ -89,7 +89,7 @@ define i64 @split_extract_8i64_idx(<vscale x 8 x i64> %a, i32 %idx) {
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-4
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: cnth x8
; CHECK-NEXT: mov w9, w0
@@ -134,7 +134,7 @@ define i16 @split_extract_16i16(<vscale x 16 x i16> %a) {
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: rdvl x8, #1
; CHECK-NEXT: mov w9, #128 // =0x80
@@ -157,7 +157,7 @@ define i32 @split_extract_16i32(<vscale x 16 x i32> %a) {
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-4
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: rdvl x8, #1
; CHECK-NEXT: mov w9, #34464 // =0x86a0
@@ -183,7 +183,7 @@ define i64 @split_extract_4i64(<vscale x 4 x i64> %a) {
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: cntw x8
; CHECK-NEXT: mov w9, #10 // =0xa
diff --git a/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll b/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll
index d7ed42d..4ed59bc 100644
--- a/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll
+++ b/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll
@@ -21,7 +21,7 @@ define <vscale x 32 x i8> @split_insert_32i8_idx(<vscale x 32 x i8> %a, i8 %elt,
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: rdvl x8, #2
; CHECK-NEXT: mov x9, sp
@@ -45,7 +45,7 @@ define <vscale x 8 x float> @split_insert_8f32_idx(<vscale x 8 x float> %a, floa
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: cnth x8
; CHECK-NEXT: mov x9, sp
@@ -69,7 +69,7 @@ define <vscale x 8 x i64> @split_insert_8i64_idx(<vscale x 8 x i64> %a, i64 %elt
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-4
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: cnth x8
; CHECK-NEXT: mov x9, sp
@@ -130,7 +130,7 @@ define <vscale x 32 x i16> @split_insert_32i16(<vscale x 32 x i16> %a, i16 %elt)
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-4
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: rdvl x8, #2
; CHECK-NEXT: mov w9, #128 // =0x80
@@ -159,7 +159,7 @@ define <vscale x 8 x i32> @split_insert_8i32(<vscale x 8 x i32> %a, i32 %elt) {
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: cnth x8
; CHECK-NEXT: mov w9, #16960 // =0x4240
diff --git a/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll b/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll
index c5cf459..e0da9b57 100644
--- a/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll
+++ b/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll
@@ -16,7 +16,7 @@ define i32 @csr_d8_allocnxv4i32i32f64(double %d) "aarch64_pstate_sm_compatible"
; CHECK-NEXT: str x29, [sp, #8] // 8-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 32 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -8
; CHECK-NEXT: .cfi_offset b8, -16
; CHECK-NEXT: mov z1.s, #0 // =0x0
@@ -219,7 +219,7 @@ define i32 @csr_d8_allocnxv4i32i32f64_stackargsi32f64(double %d0, double %d1, do
; CHECK-NEXT: str x29, [sp, #8] // 8-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 32 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 32 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -8
; CHECK-NEXT: .cfi_offset b8, -16
; CHECK-NEXT: mov z1.s, #0 // =0x0
@@ -266,7 +266,7 @@ define i32 @svecc_z8_allocnxv4i32i32f64_fp(double %d, <vscale x 4 x i32> %v) "aa
; CHECK-NEXT: .cfi_def_cfa w29, 16
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: //APP
; CHECK-NEXT: //NO_APP
@@ -310,7 +310,7 @@ define i32 @svecc_z8_allocnxv4i32i32f64_stackargsi32_fp(double %d, i32 %i0, i32
; CHECK-NEXT: .cfi_def_cfa w29, 16
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: //APP
; CHECK-NEXT: //NO_APP
@@ -383,7 +383,7 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3,
; CHECK-NEXT: .cfi_offset w30, -40
; CHECK-NEXT: .cfi_offset w29, -48
; CHECK-NEXT: addvl sp, sp, #-18
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x30, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 48 + 144 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x30, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 48 + 144 * VG
; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
@@ -412,14 +412,14 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3,
; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 48 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 48 - 16 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 48 - 24 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 48 - 32 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 48 - 40 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 48 - 48 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 48 - 56 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x50, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 48 - 64 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d8 @ cfa - 8 * VG - 48
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d9 @ cfa - 16 * VG - 48
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d10 @ cfa - 24 * VG - 48
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d11 @ cfa - 32 * VG - 48
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d12 @ cfa - 40 * VG - 48
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d13 @ cfa - 48 * VG - 48
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d14 @ cfa - 56 * VG - 48
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x50, 0x22 // $d15 @ cfa - 64 * VG - 48
; CHECK-NEXT: mov x8, x0
; CHECK-NEXT: //APP
; CHECK-NEXT: //NO_APP
diff --git a/llvm/test/CodeGen/AArch64/sve-trunc.ll b/llvm/test/CodeGen/AArch64/sve-trunc.ll
index 0ec6538..50580cb 100644
--- a/llvm/test/CodeGen/AArch64/sve-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-trunc.ll
@@ -115,7 +115,7 @@ define <vscale x 16 x i1> @trunc_i64toi1_split3(<vscale x 16 x i64> %in) {
; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: and z7.d, z7.d, #0x1
; CHECK-NEXT: and z6.d, z6.d, #0x1
diff --git a/llvm/test/CodeGen/AArch64/sve-vector-compress.ll b/llvm/test/CodeGen/AArch64/sve-vector-compress.ll
index 8a504cd..198e0a3 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-compress.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-compress.ll
@@ -105,7 +105,7 @@ define <vscale x 8 x i32> @test_compress_large(<vscale x 8 x i32> %vec, <vscale
; CHECK: // %bb.0:
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: punpklo p2.h, p0.b
; CHECK-NEXT: cnth x9
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll
index 0eacac2..1dbd7dd 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll
@@ -276,7 +276,7 @@ define <vscale x 16 x i8> @ld1_x2_i8_z0_taken(target("aarch64.svcount") %pn, ptr
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: mov p8.b, p0.b
; CHECK-NEXT: ld1b { z2.b, z3.b }, pn8/z, [x0]
@@ -298,7 +298,7 @@ define <vscale x 16 x i8> @ld1_x2_i8_z0_taken_scalar(target("aarch64.svcount") %
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: mov p8.b, p0.b
; CHECK-NEXT: ld1b { z2.b, z3.b }, pn8/z, [x0, x1]
@@ -585,7 +585,7 @@ define <vscale x 8 x i16> @ld1_x4_i16_z0_taken(target("aarch64.svcount") %pn, pt
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: mov p8.b, p0.b
; CHECK-NEXT: ld1h { z4.h - z7.h }, pn8/z, [x0]
@@ -607,7 +607,7 @@ define <vscale x 8 x i16> @ld1_x4_i16_z0_taken_scalar(target("aarch64.svcount")
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: mov p8.b, p0.b
; CHECK-NEXT: ld1h { z4.h - z7.h }, pn8/z, [x0, x1, lsl #1]
@@ -896,7 +896,7 @@ define <vscale x 4 x i32> @ldnt1_x2_i32_z0_taken(target("aarch64.svcount") %pn,
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: mov p8.b, p0.b
; CHECK-NEXT: ldnt1w { z2.s, z3.s }, pn8/z, [x0]
@@ -918,7 +918,7 @@ define <vscale x 4 x i32> @ldnt1_x2_i32_z0_taken_scalar(target("aarch64.svcount"
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: mov p8.b, p0.b
; CHECK-NEXT: ldnt1w { z2.s, z3.s }, pn8/z, [x0, x1, lsl #2]
@@ -1205,7 +1205,7 @@ define <vscale x 2 x i64> @ldnt1_x4_i64_z0_taken(target("aarch64.svcount") %pn,
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: mov p8.b, p0.b
; CHECK-NEXT: ldnt1d { z4.d - z7.d }, pn8/z, [x0]
@@ -1227,7 +1227,7 @@ define <vscale x 2 x i64> @ldnt1_x4_i64_z0_taken_scalar(target("aarch64.svcount"
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: mov p8.b, p0.b
; CHECK-NEXT: ldnt1d { z4.d - z7.d }, pn8/z, [x0, x1, lsl #3]
diff --git a/llvm/test/CodeGen/AArch64/unwind-preserved.ll b/llvm/test/CodeGen/AArch64/unwind-preserved.ll
index 822be14..7e1f63d 100644
--- a/llvm/test/CodeGen/AArch64/unwind-preserved.ll
+++ b/llvm/test/CodeGen/AArch64/unwind-preserved.ll
@@ -13,7 +13,7 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: addvl sp, sp, #-18
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG
; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
@@ -42,27 +42,27 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
-; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16
; CHECK-NEXT: addvl sp, sp, #-2
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x01, 0x1e, 0x22 // sp + 16 + 160 * VG
; CHECK-NEXT: .cfi_remember_state
; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: .Ltmp0:
+; CHECK-NEXT: .Ltmp0: // EH_LABEL
; CHECK-NEXT: bl may_throw_sve
-; CHECK-NEXT: .Ltmp1:
+; CHECK-NEXT: .Ltmp1: // EH_LABEL
; CHECK-NEXT: str z0, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT: b .LBB0_1
; CHECK-NEXT: .LBB0_1: // %.Lcontinue
; CHECK-NEXT: ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: addvl sp, sp, #2
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG
; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
@@ -108,10 +108,10 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB0_2: // %.Lunwind
; CHECK-NEXT: .cfi_restore_state
-; CHECK-NEXT: .Ltmp2:
+; CHECK-NEXT: .Ltmp2: // EH_LABEL
; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: addvl sp, sp, #2
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG
; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
@@ -165,7 +165,7 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
; GISEL-NEXT: .cfi_offset w30, -8
; GISEL-NEXT: .cfi_offset w29, -16
; GISEL-NEXT: addvl sp, sp, #-18
-; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
+; GISEL-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG
; GISEL-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
; GISEL-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
; GISEL-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
@@ -194,27 +194,27 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
; GISEL-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
; GISEL-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
; GISEL-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; GISEL-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; GISEL-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
-; GISEL-NEXT: .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
-; GISEL-NEXT: .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
-; GISEL-NEXT: .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG
-; GISEL-NEXT: .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
-; GISEL-NEXT: .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
-; GISEL-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
+; GISEL-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
+; GISEL-NEXT: .cfi_escape 0x10, 0x49, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x40, 0x1c // $d9 @ cfa - 16 * VG - 16
+; GISEL-NEXT: .cfi_escape 0x10, 0x4a, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x40, 0x1c // $d10 @ cfa - 24 * VG - 16
+; GISEL-NEXT: .cfi_escape 0x10, 0x4b, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x40, 0x1c // $d11 @ cfa - 32 * VG - 16
+; GISEL-NEXT: .cfi_escape 0x10, 0x4c, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x40, 0x1c // $d12 @ cfa - 40 * VG - 16
+; GISEL-NEXT: .cfi_escape 0x10, 0x4d, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x40, 0x1c // $d13 @ cfa - 48 * VG - 16
+; GISEL-NEXT: .cfi_escape 0x10, 0x4e, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x40, 0x1c // $d14 @ cfa - 56 * VG - 16
+; GISEL-NEXT: .cfi_escape 0x10, 0x4f, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x40, 0x1c // $d15 @ cfa - 64 * VG - 16
; GISEL-NEXT: addvl sp, sp, #-2
-; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG
+; GISEL-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x01, 0x1e, 0x22 // sp + 16 + 160 * VG
; GISEL-NEXT: .cfi_remember_state
; GISEL-NEXT: str z0, [sp] // 16-byte Folded Spill
-; GISEL-NEXT: .Ltmp0:
+; GISEL-NEXT: .Ltmp0: // EH_LABEL
; GISEL-NEXT: bl may_throw_sve
-; GISEL-NEXT: .Ltmp1:
+; GISEL-NEXT: .Ltmp1: // EH_LABEL
; GISEL-NEXT: str z0, [sp, #1, mul vl] // 16-byte Folded Spill
; GISEL-NEXT: b .LBB0_1
; GISEL-NEXT: .LBB0_1: // %.Lcontinue
; GISEL-NEXT: ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload
; GISEL-NEXT: addvl sp, sp, #2
-; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
+; GISEL-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG
; GISEL-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; GISEL-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
; GISEL-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
@@ -260,10 +260,10 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
; GISEL-NEXT: ret
; GISEL-NEXT: .LBB0_2: // %.Lunwind
; GISEL-NEXT: .cfi_restore_state
-; GISEL-NEXT: .Ltmp2:
+; GISEL-NEXT: .Ltmp2: // EH_LABEL
; GISEL-NEXT: ldr z0, [sp] // 16-byte Folded Reload
; GISEL-NEXT: addvl sp, sp, #2
-; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
+; GISEL-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x90, 0x01, 0x1e, 0x22 // sp + 16 + 144 * VG
; GISEL-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; GISEL-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
; GISEL-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
@@ -355,9 +355,9 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
; CHECK-NEXT: .cfi_offset b23, -272
; CHECK-NEXT: .cfi_remember_state
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: .Ltmp3:
+; CHECK-NEXT: .Ltmp3: // EH_LABEL
; CHECK-NEXT: bl may_throw_neon
-; CHECK-NEXT: .Ltmp4:
+; CHECK-NEXT: .Ltmp4: // EH_LABEL
; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
; CHECK-NEXT: b .LBB1_1
; CHECK-NEXT: .LBB1_1: // %.Lcontinue
@@ -394,7 +394,7 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB1_2: // %.Lunwind
; CHECK-NEXT: .cfi_restore_state
-; CHECK-NEXT: .Ltmp5:
+; CHECK-NEXT: .Ltmp5: // EH_LABEL
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: ldp x29, x30, [sp, #288] // 16-byte Folded Reload
; CHECK-NEXT: ldp q9, q8, [sp, #256] // 32-byte Folded Reload
@@ -462,10 +462,10 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
; GISEL-NEXT: .cfi_offset b23, -272
; GISEL-NEXT: .cfi_remember_state
; GISEL-NEXT: str q0, [sp] // 16-byte Folded Spill
-; GISEL-NEXT: .Ltmp3:
+; GISEL-NEXT: .Ltmp3: // EH_LABEL
; GISEL-NEXT: bl may_throw_neon
; GISEL-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
-; GISEL-NEXT: .Ltmp4:
+; GISEL-NEXT: .Ltmp4: // EH_LABEL
; GISEL-NEXT: b .LBB1_1
; GISEL-NEXT: .LBB1_1: // %.Lcontinue
; GISEL-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
@@ -501,7 +501,7 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
; GISEL-NEXT: ret
; GISEL-NEXT: .LBB1_2: // %.Lunwind
; GISEL-NEXT: .cfi_restore_state
-; GISEL-NEXT: .Ltmp5:
+; GISEL-NEXT: .Ltmp5: // EH_LABEL
; GISEL-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; GISEL-NEXT: ldp x29, x30, [sp, #288] // 16-byte Folded Reload
; GISEL-NEXT: ldp q9, q8, [sp, #256] // 32-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/xray-custom-log.ll b/llvm/test/CodeGen/AArch64/xray-custom-log.ll
index fd8ddf9..2432808 100644
--- a/llvm/test/CodeGen/AArch64/xray-custom-log.ll
+++ b/llvm/test/CodeGen/AArch64/xray-custom-log.ll
@@ -1,7 +1,5 @@
; RUN: llc -mtriple=aarch64 < %s | FileCheck %s
; RUN: llc -mtriple=arm64-apple-darwin < %s | FileCheck %s --check-prefix=MACHO
-; RUN: llc -filetype=obj -mtriple=aarch64 %s -o %t
-; RUN: llvm-dwarfdump -debug-info %t | FileCheck %s --check-prefix=DBG
; MACHO: bl ___xray_CustomEvent
; MACHO: bl ___xray_CustomEvent
@@ -92,18 +90,6 @@ entry:
; CHECK-NEXT: .byte 0x02
; CHECK-NEXT: .zero 13
-;; Construct call site entries for PATCHABLE_EVENT_CALL.
-; DBG: DW_TAG_subprogram
-; DBG: DW_AT_name
-; DBG-SAME: ("customevent")
-; DBG: DW_TAG_call_site
-; DBG-NEXT: DW_AT_call_target (DW_OP_reg0 {{.*}})
-; DBG-NEXT: DW_AT_call_return_pc
-; DBG-EMPTY:
-; DBG: DW_TAG_call_site
-; DBG-NEXT: DW_AT_call_target (DW_OP_reg2 {{.*}})
-; DBG-NEXT: DW_AT_call_return_pc
-
declare void @llvm.xray.customevent(ptr, i64)
declare void @llvm.xray.typedevent(i64, ptr, i64)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll
index a066b15..e6a8bac 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll
@@ -1917,8 +1917,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; GFX9-NEXT: s_mov_b32 s0, 0
; GFX9-NEXT: scratch_store_dword off, v0, s0 offset:4
; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_movk_i32 s0, 0x3e80
; GFX9-NEXT: v_mov_b32_e32 v0, 15
-; GFX9-NEXT: s_movk_i32 s0, 0x3e84
+; GFX9-NEXT: s_add_i32 s0, s0, 4
; GFX9-NEXT: scratch_store_dword off, v0, s0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: scratch_load_dword v0, off, s0 glc
@@ -1933,7 +1934,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s9
; GFX10-NEXT: v_mov_b32_e32 v0, 13
; GFX10-NEXT: v_mov_b32_e32 v1, 15
-; GFX10-NEXT: s_movk_i32 s0, 0x3e84
+; GFX10-NEXT: s_movk_i32 s0, 0x3e80
+; GFX10-NEXT: s_add_i32 s0, s0, 4
; GFX10-NEXT: scratch_store_dword off, v0, off offset:4
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: scratch_store_dword off, v1, s0
@@ -1945,10 +1947,11 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; GFX942-LABEL: store_load_large_imm_offset_kernel:
; GFX942: ; %bb.0: ; %bb
; GFX942-NEXT: v_mov_b32_e32 v0, 13
+; GFX942-NEXT: s_movk_i32 s0, 0x3e80
; GFX942-NEXT: scratch_store_dword off, v0, off offset:4 sc0 sc1
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v0, 15
-; GFX942-NEXT: s_movk_i32 s0, 0x3e84
+; GFX942-NEXT: s_add_i32 s0, s0, 4
; GFX942-NEXT: scratch_store_dword off, v0, s0 sc0 sc1
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: scratch_load_dword v0, off, s0 sc0 sc1
@@ -1958,7 +1961,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; GFX11-LABEL: store_load_large_imm_offset_kernel:
; GFX11: ; %bb.0: ; %bb
; GFX11-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15
-; GFX11-NEXT: s_movk_i32 s0, 0x3e84
+; GFX11-NEXT: s_movk_i32 s0, 0x3e80
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_add_i32 s0, s0, 4
; GFX11-NEXT: scratch_store_b32 off, v0, off offset:4 dlc
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: scratch_store_b32 off, v1, s0 dlc
@@ -1986,8 +1991,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; UNALIGNED_GFX9-NEXT: s_mov_b32 s0, 0
; UNALIGNED_GFX9-NEXT: scratch_store_dword off, v0, s0 offset:4
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
+; UNALIGNED_GFX9-NEXT: s_movk_i32 s0, 0x3e80
; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v0, 15
-; UNALIGNED_GFX9-NEXT: s_movk_i32 s0, 0x3e84
+; UNALIGNED_GFX9-NEXT: s_add_i32 s0, s0, 4
; UNALIGNED_GFX9-NEXT: scratch_store_dword off, v0, s0
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX9-NEXT: scratch_load_dword v0, off, s0 glc
@@ -2002,7 +2008,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; UNALIGNED_GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s9
; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v0, 13
; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v1, 15
-; UNALIGNED_GFX10-NEXT: s_movk_i32 s0, 0x3e84
+; UNALIGNED_GFX10-NEXT: s_movk_i32 s0, 0x3e80
+; UNALIGNED_GFX10-NEXT: s_add_i32 s0, s0, 4
; UNALIGNED_GFX10-NEXT: scratch_store_dword off, v0, off offset:4
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; UNALIGNED_GFX10-NEXT: scratch_store_dword off, v1, s0
@@ -2014,10 +2021,11 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; UNALIGNED_GFX942-LABEL: store_load_large_imm_offset_kernel:
; UNALIGNED_GFX942: ; %bb.0: ; %bb
; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v0, 13
+; UNALIGNED_GFX942-NEXT: s_movk_i32 s0, 0x3e80
; UNALIGNED_GFX942-NEXT: scratch_store_dword off, v0, off offset:4 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v0, 15
-; UNALIGNED_GFX942-NEXT: s_movk_i32 s0, 0x3e84
+; UNALIGNED_GFX942-NEXT: s_add_i32 s0, s0, 4
; UNALIGNED_GFX942-NEXT: scratch_store_dword off, v0, s0 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX942-NEXT: scratch_load_dword v0, off, s0 sc0 sc1
@@ -2027,7 +2035,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; UNALIGNED_GFX11-LABEL: store_load_large_imm_offset_kernel:
; UNALIGNED_GFX11: ; %bb.0: ; %bb
; UNALIGNED_GFX11-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15
-; UNALIGNED_GFX11-NEXT: s_movk_i32 s0, 0x3e84
+; UNALIGNED_GFX11-NEXT: s_movk_i32 s0, 0x3e80
+; UNALIGNED_GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; UNALIGNED_GFX11-NEXT: s_add_i32 s0, s0, 4
; UNALIGNED_GFX11-NEXT: scratch_store_b32 off, v0, off offset:4 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; UNALIGNED_GFX11-NEXT: scratch_store_b32 off, v1, s0 dlc
@@ -2061,11 +2071,13 @@ define void @store_load_large_imm_offset_foo() {
; GFX9-LABEL: store_load_large_imm_offset_foo:
; GFX9: ; %bb.0: ; %bb
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_movk_i32 s0, 0x3e80
; GFX9-NEXT: v_mov_b32_e32 v0, 13
+; GFX9-NEXT: s_add_i32 s1, s32, s0
; GFX9-NEXT: scratch_store_dword off, v0, s32 offset:4
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, 15
-; GFX9-NEXT: s_add_i32 s0, s32, 0x3e84
+; GFX9-NEXT: s_add_i32 s0, s1, 4
; GFX9-NEXT: scratch_store_dword off, v0, s0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: scratch_load_dword v0, off, s0 glc
@@ -2076,8 +2088,10 @@ define void @store_load_large_imm_offset_foo() {
; GFX10: ; %bb.0: ; %bb
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v0, 13
+; GFX10-NEXT: s_movk_i32 s0, 0x3e80
; GFX10-NEXT: v_mov_b32_e32 v1, 15
-; GFX10-NEXT: s_add_i32 s0, s32, 0x3e84
+; GFX10-NEXT: s_add_i32 s1, s32, s0
+; GFX10-NEXT: s_add_i32 s0, s1, 4
; GFX10-NEXT: scratch_store_dword off, v0, s32 offset:4
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: scratch_store_dword off, v1, s0
@@ -2089,11 +2103,13 @@ define void @store_load_large_imm_offset_foo() {
; GFX942-LABEL: store_load_large_imm_offset_foo:
; GFX942: ; %bb.0: ; %bb
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: s_movk_i32 s0, 0x3e80
; GFX942-NEXT: v_mov_b32_e32 v0, 13
+; GFX942-NEXT: s_add_i32 s1, s32, s0
; GFX942-NEXT: scratch_store_dword off, v0, s32 offset:4 sc0 sc1
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v0, 15
-; GFX942-NEXT: s_add_i32 s0, s32, 0x3e84
+; GFX942-NEXT: s_add_i32 s0, s1, 4
; GFX942-NEXT: scratch_store_dword off, v0, s0 sc0 sc1
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: scratch_load_dword v0, off, s0 sc0 sc1
@@ -2104,7 +2120,10 @@ define void @store_load_large_imm_offset_foo() {
; GFX11: ; %bb.0: ; %bb
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15
-; GFX11-NEXT: s_add_i32 s0, s32, 0x3e84
+; GFX11-NEXT: s_movk_i32 s0, 0x3e80
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: s_add_i32 s1, s32, s0
+; GFX11-NEXT: s_add_i32 s0, s1, 4
; GFX11-NEXT: scratch_store_b32 off, v0, s32 offset:4 dlc
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: scratch_store_b32 off, v1, s0 dlc
@@ -2133,11 +2152,13 @@ define void @store_load_large_imm_offset_foo() {
; UNALIGNED_GFX9-LABEL: store_load_large_imm_offset_foo:
; UNALIGNED_GFX9: ; %bb.0: ; %bb
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; UNALIGNED_GFX9-NEXT: s_movk_i32 s0, 0x3e80
; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v0, 13
+; UNALIGNED_GFX9-NEXT: s_add_i32 s1, s32, s0
; UNALIGNED_GFX9-NEXT: scratch_store_dword off, v0, s32 offset:4
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v0, 15
-; UNALIGNED_GFX9-NEXT: s_add_i32 s0, s32, 0x3e84
+; UNALIGNED_GFX9-NEXT: s_add_i32 s0, s1, 4
; UNALIGNED_GFX9-NEXT: scratch_store_dword off, v0, s0
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX9-NEXT: scratch_load_dword v0, off, s0 glc
@@ -2148,8 +2169,10 @@ define void @store_load_large_imm_offset_foo() {
; UNALIGNED_GFX10: ; %bb.0: ; %bb
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v0, 13
+; UNALIGNED_GFX10-NEXT: s_movk_i32 s0, 0x3e80
; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v1, 15
-; UNALIGNED_GFX10-NEXT: s_add_i32 s0, s32, 0x3e84
+; UNALIGNED_GFX10-NEXT: s_add_i32 s1, s32, s0
+; UNALIGNED_GFX10-NEXT: s_add_i32 s0, s1, 4
; UNALIGNED_GFX10-NEXT: scratch_store_dword off, v0, s32 offset:4
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; UNALIGNED_GFX10-NEXT: scratch_store_dword off, v1, s0
@@ -2161,11 +2184,13 @@ define void @store_load_large_imm_offset_foo() {
; UNALIGNED_GFX942-LABEL: store_load_large_imm_offset_foo:
; UNALIGNED_GFX942: ; %bb.0: ; %bb
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; UNALIGNED_GFX942-NEXT: s_movk_i32 s0, 0x3e80
; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v0, 13
+; UNALIGNED_GFX942-NEXT: s_add_i32 s1, s32, s0
; UNALIGNED_GFX942-NEXT: scratch_store_dword off, v0, s32 offset:4 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v0, 15
-; UNALIGNED_GFX942-NEXT: s_add_i32 s0, s32, 0x3e84
+; UNALIGNED_GFX942-NEXT: s_add_i32 s0, s1, 4
; UNALIGNED_GFX942-NEXT: scratch_store_dword off, v0, s0 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX942-NEXT: scratch_load_dword v0, off, s0 sc0 sc1
@@ -2176,7 +2201,10 @@ define void @store_load_large_imm_offset_foo() {
; UNALIGNED_GFX11: ; %bb.0: ; %bb
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; UNALIGNED_GFX11-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15
-; UNALIGNED_GFX11-NEXT: s_add_i32 s0, s32, 0x3e84
+; UNALIGNED_GFX11-NEXT: s_movk_i32 s0, 0x3e80
+; UNALIGNED_GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; UNALIGNED_GFX11-NEXT: s_add_i32 s1, s32, s0
+; UNALIGNED_GFX11-NEXT: s_add_i32 s0, s1, 4
; UNALIGNED_GFX11-NEXT: scratch_store_b32 off, v0, s32 offset:4 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; UNALIGNED_GFX11-NEXT: scratch_store_b32 off, v1, s0 dlc
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
index 2785b78..481a254 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
@@ -2243,36 +2243,22 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr
;
; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat:
; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-NEXT: s_mov_b32 s1, exec_lo
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s1, 0
-; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-NEXT: v_cmpx_eq_u32_e32 0, v0
-; GFX1250-NEXT: s_cbranch_execz .LBB51_3
+; GFX1250-NEXT: s_cbranch_execz .LBB51_2
; GFX1250-NEXT: ; %bb.1:
-; GFX1250-NEXT: s_bcnt1_i32_b32 s1, s1
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
-; GFX1250-NEXT: s_load_b32 s1, s[4:5], 0x24
+; GFX1250-NEXT: s_bcnt1_i32_b32 s0, s0
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_mov_b32_e32 v4, s1
-; GFX1250-NEXT: ds_load_b64 v[2:3], v4
-; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 4.0, v[0:1]
-; GFX1250-NEXT: .LBB51_2: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_add_f64_e32 v[6:7], v[2:3], v[0:1]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[6:7], v4, v[6:7], v[2:3]
+; GFX1250-NEXT: v_dual_mul_f64 v[0:1], 4.0, v[0:1] :: v_dual_mov_b32 v2, s0
+; GFX1250-NEXT: ds_add_f64 v2, v[0:1]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[2:3]
-; GFX1250-NEXT: v_mov_b64_e32 v[2:3], v[6:7]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB51_2
-; GFX1250-NEXT: .LBB51_3:
+; GFX1250-NEXT: .LBB51_2:
; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -2322,36 +2308,22 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3
;
; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat_flush:
; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-NEXT: s_mov_b32 s1, exec_lo
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s1, 0
-; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-NEXT: v_cmpx_eq_u32_e32 0, v0
-; GFX1250-NEXT: s_cbranch_execz .LBB52_3
+; GFX1250-NEXT: s_cbranch_execz .LBB52_2
; GFX1250-NEXT: ; %bb.1:
-; GFX1250-NEXT: s_bcnt1_i32_b32 s1, s1
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
-; GFX1250-NEXT: s_load_b32 s1, s[4:5], 0x24
+; GFX1250-NEXT: s_bcnt1_i32_b32 s0, s0
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_mov_b32_e32 v4, s1
-; GFX1250-NEXT: ds_load_b64 v[2:3], v4
-; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 4.0, v[0:1]
-; GFX1250-NEXT: .LBB52_2: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: v_dual_mul_f64 v[0:1], 4.0, v[0:1] :: v_dual_mov_b32 v2, s0
+; GFX1250-NEXT: ds_add_f64 v2, v[0:1]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_add_f64_e32 v[6:7], v[2:3], v[0:1]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[6:7], v4, v[6:7], v[2:3]
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[2:3]
-; GFX1250-NEXT: v_mov_b64_e32 v[2:3], v[6:7]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB52_2
-; GFX1250-NEXT: .LBB52_3:
+; GFX1250-NEXT: .LBB52_2:
; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -2401,36 +2373,22 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrsp
;
; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe:
; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-NEXT: s_mov_b32 s1, exec_lo
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s1, 0
-; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-NEXT: v_cmpx_eq_u32_e32 0, v0
-; GFX1250-NEXT: s_cbranch_execz .LBB53_3
+; GFX1250-NEXT: s_cbranch_execz .LBB53_2
; GFX1250-NEXT: ; %bb.1:
-; GFX1250-NEXT: s_bcnt1_i32_b32 s1, s1
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
-; GFX1250-NEXT: s_load_b32 s1, s[4:5], 0x24
+; GFX1250-NEXT: s_bcnt1_i32_b32 s0, s0
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_mov_b32_e32 v4, s1
-; GFX1250-NEXT: ds_load_b64 v[2:3], v4
-; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 4.0, v[0:1]
-; GFX1250-NEXT: .LBB53_2: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_add_f64_e32 v[6:7], v[2:3], v[0:1]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[6:7], v4, v[6:7], v[2:3]
+; GFX1250-NEXT: v_dual_mul_f64 v[0:1], 4.0, v[0:1] :: v_dual_mov_b32 v2, s0
+; GFX1250-NEXT: ds_add_f64 v2, v[0:1]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[2:3]
-; GFX1250-NEXT: v_mov_b64_e32 v[2:3], v[6:7]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB53_2
-; GFX1250-NEXT: .LBB53_3:
+; GFX1250-NEXT: .LBB53_2:
; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -2459,23 +2417,9 @@ define double @local_atomic_fadd_f64_rtn_pat(ptr addrspace(3) %ptr, double %data
; GFX1250: ; %bb.0: ; %main_body
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_mov_b32_e32 v2, v0
-; GFX1250-NEXT: ds_load_b64 v[0:1], v0
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: .LBB54_1: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_mov_b64_e32 v[4:5], v[0:1]
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: v_add_f64_e32 v[0:1], 4.0, v[4:5]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[4:5]
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], 4.0
+; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB54_1
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-addrspacecast.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-addrspacecast.mir
index 6a4522f..d69a3e1 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-addrspacecast.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-addrspacecast.mir
@@ -141,11 +141,11 @@ body: |
; SIVI-NEXT: {{ $}}
; SIVI-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5
; SIVI-NEXT: [[COPY1:%[0-9]+]]:_(p5) = COPY $vgpr0
+ ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY1]](p5)
; SIVI-NEXT: [[COPY2:%[0-9]+]]:_(p4) = COPY [[COPY]](p4)
; SIVI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 68
; SIVI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY2]], [[C]](s64)
; SIVI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (dereferenceable invariant load (s32), addrspace 4)
- ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY1]](p5)
; SIVI-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[LOAD]](s32)
; SIVI-NEXT: [[C1:%[0-9]+]]:_(p5) = G_CONSTANT i32 -1
; SIVI-NEXT: [[C2:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
@@ -157,9 +157,9 @@ body: |
; GFX9: liveins: $vgpr0
; GFX9-NEXT: {{ $}}
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
+ ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY]](p5)
; GFX9-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64(s64) = S_MOV_B64 $src_private_base
; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[S_MOV_B64_]](s64)
- ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY]](p5)
; GFX9-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[UV1]](s32)
; GFX9-NEXT: [[C:%[0-9]+]]:_(p5) = G_CONSTANT i32 -1
; GFX9-NEXT: [[C1:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
@@ -210,11 +210,11 @@ body: |
; SIVI-NEXT: {{ $}}
; SIVI-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5
; SIVI-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr0
+ ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY1]](p3)
; SIVI-NEXT: [[COPY2:%[0-9]+]]:_(p4) = COPY [[COPY]](p4)
; SIVI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; SIVI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY2]], [[C]](s64)
; SIVI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (dereferenceable invariant load (s32), align 64, addrspace 4)
- ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY1]](p3)
; SIVI-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[LOAD]](s32)
; SIVI-NEXT: [[C1:%[0-9]+]]:_(p3) = G_CONSTANT i32 -1
; SIVI-NEXT: [[C2:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
@@ -226,9 +226,9 @@ body: |
; GFX9: liveins: $vgpr0
; GFX9-NEXT: {{ $}}
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
+ ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY]](p3)
; GFX9-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64(s64) = S_MOV_B64 $src_shared_base
; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[S_MOV_B64_]](s64)
- ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY]](p3)
; GFX9-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[UV1]](s32)
; GFX9-NEXT: [[C:%[0-9]+]]:_(p3) = G_CONSTANT i32 -1
; GFX9-NEXT: [[C1:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
@@ -354,20 +354,20 @@ body: |
; SIVI-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5
; SIVI-NEXT: [[COPY1:%[0-9]+]]:_(<2 x p3>) = COPY $vgpr0_vgpr1
; SIVI-NEXT: [[UV:%[0-9]+]]:_(p3), [[UV1:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[COPY1]](<2 x p3>)
+ ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV]](p3)
; SIVI-NEXT: [[COPY2:%[0-9]+]]:_(p4) = COPY [[COPY]](p4)
; SIVI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; SIVI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY2]], [[C]](s64)
; SIVI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (dereferenceable invariant load (s32), align 64, addrspace 4)
- ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV]](p3)
; SIVI-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[LOAD]](s32)
; SIVI-NEXT: [[C1:%[0-9]+]]:_(p3) = G_CONSTANT i32 -1
; SIVI-NEXT: [[C2:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
; SIVI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](p3), [[C1]]
; SIVI-NEXT: [[SELECT:%[0-9]+]]:_(p0) = G_SELECT [[ICMP]](s1), [[MV]], [[C2]]
+ ; SIVI-NEXT: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV1]](p3)
; SIVI-NEXT: [[COPY3:%[0-9]+]]:_(p4) = COPY [[COPY]](p4)
; SIVI-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY3]], [[C]](s64)
; SIVI-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p4) :: (dereferenceable invariant load (s32), align 64, addrspace 4)
- ; SIVI-NEXT: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV1]](p3)
; SIVI-NEXT: [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT1]](s32), [[LOAD1]](s32)
; SIVI-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](p3), [[C1]]
; SIVI-NEXT: [[SELECT1:%[0-9]+]]:_(p0) = G_SELECT [[ICMP1]](s1), [[MV1]], [[C2]]
@@ -379,17 +379,17 @@ body: |
; GFX9-NEXT: {{ $}}
; GFX9-NEXT: [[COPY:%[0-9]+]]:_(<2 x p3>) = COPY $vgpr0_vgpr1
; GFX9-NEXT: [[UV:%[0-9]+]]:_(p3), [[UV1:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[COPY]](<2 x p3>)
+ ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV]](p3)
; GFX9-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64(s64) = S_MOV_B64 $src_shared_base
; GFX9-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[S_MOV_B64_]](s64)
- ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV]](p3)
; GFX9-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[UV3]](s32)
; GFX9-NEXT: [[C:%[0-9]+]]:_(p3) = G_CONSTANT i32 -1
; GFX9-NEXT: [[C1:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
; GFX9-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](p3), [[C]]
; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(p0) = G_SELECT [[ICMP]](s1), [[MV]], [[C1]]
+ ; GFX9-NEXT: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV1]](p3)
; GFX9-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64(s64) = S_MOV_B64 $src_shared_base
; GFX9-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[S_MOV_B64_1]](s64)
- ; GFX9-NEXT: [[PTRTOINT1:%[0-9]+]]:_(s32) = G_PTRTOINT [[UV1]](p3)
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT1]](s32), [[UV5]](s32)
; GFX9-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](p3), [[C]]
; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(p0) = G_SELECT [[ICMP1]](s1), [[MV1]], [[C1]]
@@ -506,19 +506,19 @@ body: |
; SIVI-NEXT: {{ $}}
; SIVI-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5
; SIVI-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0
+ ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[FRAME_INDEX]](p5)
; SIVI-NEXT: [[COPY1:%[0-9]+]]:_(p4) = COPY [[COPY]](p4)
; SIVI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 68
; SIVI-NEXT: [[PTR_ADD:%[0-9]+]]:_(p4) = nuw inbounds G_PTR_ADD [[COPY1]], [[C]](s64)
; SIVI-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (dereferenceable invariant load (s32), addrspace 4)
- ; SIVI-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[FRAME_INDEX]](p5)
; SIVI-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[LOAD]](s32)
; SIVI-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p0)
;
; GFX9-LABEL: name: test_addrspacecast_p5_fi_to_p0
; GFX9: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0
+ ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[FRAME_INDEX]](p5)
; GFX9-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64(s64) = S_MOV_B64 $src_private_base
; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[S_MOV_B64_]](s64)
- ; GFX9-NEXT: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[FRAME_INDEX]](p5)
; GFX9-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[PTRTOINT]](s32), [[UV1]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[MV]](p0)
%0:_(p5) = G_FRAME_INDEX %stack.0
diff --git a/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll b/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll
new file mode 100644
index 0000000..4b6375c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/addrspacecast-gas.ll
@@ -0,0 +1,134 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s
+
+; Test code sequences for addrspacecast with globally addressable scratch.
+
+target triple = "amdgcn-amd-amdhsa"
+
+define amdgpu_kernel void @use_private_to_flat_addrspacecast(ptr addrspace(5) %ptr) {
+; GFX1250-SDAG-LABEL: use_private_to_flat_addrspacecast:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: s_load_b32 s2, s[4:5], 0x24
+; GFX1250-SDAG-NEXT: v_mbcnt_lo_u32_b32 v0, -1, 0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_flat_scratch_base_lo
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_lshlrev_b32 v1, 20, v0
+; GFX1250-SDAG-NEXT: s_cmp_lg_u32 s2, -1
+; GFX1250-SDAG-NEXT: s_cselect_b32 vcc_lo, -1, 0
+; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_cndmask_b32 v1, 0, v1
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc_lo
+; GFX1250-SDAG-NEXT: flat_store_b32 v[0:1], v2 scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: use_private_to_flat_addrspacecast:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: s_load_b32 s2, s[4:5], 0x24
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_flat_scratch_base_lo
+; GFX1250-GISEL-NEXT: v_mbcnt_lo_u32_b32 v2, -1, 0
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: s_cmp_lg_u32 s2, -1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, s2, v0
+; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v2, 20, v2
+; GFX1250-GISEL-NEXT: s_cselect_b32 s0, 1, 0
+; GFX1250-GISEL-NEXT: s_and_b32 s0, 1, s0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, v2, v1, vcc_lo
+; GFX1250-GISEL-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_cndmask_b32 v1, 0, v1
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc_lo
+; GFX1250-GISEL-NEXT: flat_store_b32 v[0:1], v2 scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX1250-GISEL-NEXT: s_endpgm
+ %stof = addrspacecast ptr addrspace(5) %ptr to ptr
+ store volatile i32 0, ptr %stof
+ ret void
+}
+
+define amdgpu_kernel void @use_private_to_flat_addrspacecast_nonnull(ptr addrspace(5) %ptr) {
+; GFX1250-SDAG-LABEL: use_private_to_flat_addrspacecast_nonnull:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX1250-SDAG-NEXT: v_mbcnt_lo_u32_b32 v0, -1, 0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 20, v0
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v0, s0
+; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_flat_scratch_base_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
+; GFX1250-SDAG-NEXT: flat_store_b32 v[0:1], v2 scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: use_private_to_flat_addrspacecast_nonnull:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: s_load_b32 s2, s[4:5], 0x24
+; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_flat_scratch_base_lo
+; GFX1250-GISEL-NEXT: v_mbcnt_lo_u32_b32 v2, -1, 0
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_lshlrev_b32 v2, 20, v2
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, s2, v0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, v2, v1, vcc_lo
+; GFX1250-GISEL-NEXT: flat_store_b32 v[0:1], v3 scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX1250-GISEL-NEXT: s_endpgm
+ %stof = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) %ptr)
+ store volatile i32 0, ptr %stof
+ ret void
+}
+
+define amdgpu_kernel void @use_flat_to_private_addrspacecast(ptr %ptr) {
+; GFX1250-LABEL: use_flat_to_private_addrspacecast:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_sub_co_i32 s2, s0, s2
+; GFX1250-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1250-NEXT: s_cselect_b32 s0, s2, -1
+; GFX1250-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_storecnt 0x0
+; GFX1250-NEXT: s_endpgm
+ %ftos = addrspacecast ptr %ptr to ptr addrspace(5)
+ store volatile i32 0, ptr addrspace(5) %ftos
+ ret void
+}
+
+define amdgpu_kernel void @use_flat_to_private_addrspacecast_nonnull(ptr %ptr) {
+; GFX1250-SDAG-LABEL: use_flat_to_private_addrspacecast_nonnull:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: s_sub_co_i32 s0, s0, s1
+; GFX1250-SDAG-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS
+; GFX1250-SDAG-NEXT: s_wait_storecnt 0x0
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: use_flat_to_private_addrspacecast_nonnull:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-NEXT: s_sub_co_i32 s0, s0, s1
+; GFX1250-GISEL-NEXT: scratch_store_b32 off, v0, s0 scope:SCOPE_SYS
+; GFX1250-GISEL-NEXT: s_wait_storecnt 0x0
+; GFX1250-GISEL-NEXT: s_endpgm
+ %ftos = call ptr addrspace(5) @llvm.amdgcn.addrspacecast.nonnull.p5.p0(ptr %ptr)
+ store volatile i32 0, ptr addrspace(5) %ftos
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll b/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll
new file mode 100644
index 0000000..5fc9f4a
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll
@@ -0,0 +1,1486 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN:llc -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck --check-prefix=GFX1250 %s
+
+define float @global_system_atomic_fadd_f32(ptr addrspace(1) %ptr, float %val) {
+; GFX1250-LABEL: global_system_atomic_fadd_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_add_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, float %val monotonic
+ ret float %result
+}
+
+define float @global_one_as_atomic_fadd_f32(ptr addrspace(1) %ptr, float %val) {
+; GFX1250-LABEL: global_one_as_atomic_fadd_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_add_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic
+ ret float %result
+}
+
+define double @global_system_atomic_fadd_f64(ptr addrspace(1) %ptr, double %val) {
+; GFX1250-LABEL: global_system_atomic_fadd_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic
+ ret double %result
+}
+
+define double @global_one_as_atomic_fadd_f64(ptr addrspace(1) %ptr, double %val) {
+; GFX1250-LABEL: global_one_as_atomic_fadd_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define float @global_system_atomic_fmin_f32(ptr addrspace(1) %ptr, float %val) {
+; GFX1250-LABEL: global_system_atomic_fmin_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_num_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, float %val monotonic
+ ret float %result
+}
+
+define float @global_one_as_atomic_fmin_f32(ptr addrspace(1) %ptr, float %val) {
+; GFX1250-LABEL: global_one_as_atomic_fmin_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_num_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic
+ ret float %result
+}
+
+define double @global_system_atomic_fmin_f64(ptr addrspace(1) %ptr, double %val) {
+; GFX1250-LABEL: global_system_atomic_fmin_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_num_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val monotonic
+ ret double %result
+}
+
+define double @global_one_as_atomic_fmin_f64(ptr addrspace(1) %ptr, double %val) {
+; GFX1250-LABEL: global_one_as_atomic_fmin_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_num_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define float @global_system_atomic_fmax_f32(ptr addrspace(1) %ptr, float %val) {
+; GFX1250-LABEL: global_system_atomic_fmax_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_num_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, float %val monotonic
+ ret float %result
+}
+
+define float @global_one_as_atomic_fmax_f32(ptr addrspace(1) %ptr, float %val) {
+; GFX1250-LABEL: global_one_as_atomic_fmax_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_num_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic
+ ret float %result
+}
+
+define double @global_system_atomic_fmax_f64(ptr addrspace(1) %ptr, double %val) {
+; GFX1250-LABEL: global_system_atomic_fmax_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_num_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val monotonic
+ ret double %result
+}
+
+define double @global_one_as_atomic_fmax_f64(ptr addrspace(1) %ptr, double %val) {
+; GFX1250-LABEL: global_one_as_atomic_fmax_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_num_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define i32 @global_one_as_atomic_min_i32(ptr addrspace(1) %ptr, i32 %val) {
+; GFX1250-LABEL: global_one_as_atomic_min_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_i32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw min ptr addrspace(1) %ptr, i32 %val syncscope("one-as") monotonic
+ ret i32 %result
+}
+
+define i32 @global_system_atomic_min_i32(ptr addrspace(1) %ptr, i32 %val) {
+; GFX1250-LABEL: global_system_atomic_min_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_i32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw min ptr addrspace(1) %ptr, i32 %val monotonic
+ ret i32 %result
+}
+
+define i32 @global_one_as_atomic_max_i32(ptr addrspace(1) %ptr, i32 %val) {
+; GFX1250-LABEL: global_one_as_atomic_max_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_i32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw max ptr addrspace(1) %ptr, i32 %val syncscope("one-as") monotonic
+ ret i32 %result
+}
+
+define i32 @global_system_atomic_max_i32(ptr addrspace(1) %ptr, i32 %val) {
+; GFX1250-LABEL: global_system_atomic_max_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_i32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw max ptr addrspace(1) %ptr, i32 %val monotonic
+ ret i32 %result
+}
+
+define i32 @global_one_as_atomic_umin_i32(ptr addrspace(1) %ptr, i32 %val) {
+; GFX1250-LABEL: global_one_as_atomic_umin_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umin ptr addrspace(1) %ptr, i32 %val syncscope("one-as") monotonic
+ ret i32 %result
+}
+
+define i32 @global_system_atomic_umin_i32(ptr addrspace(1) %ptr, i32 %val) {
+; GFX1250-LABEL: global_system_atomic_umin_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umin ptr addrspace(1) %ptr, i32 %val monotonic
+ ret i32 %result
+}
+
+define i32 @global_one_as_atomic_umax_i32(ptr addrspace(1) %ptr, i32 %val) {
+; GFX1250-LABEL: global_one_as_atomic_umax_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umax ptr addrspace(1) %ptr, i32 %val syncscope("one-as") monotonic
+ ret i32 %result
+}
+
+define i32 @global_system_atomic_umax_i32(ptr addrspace(1) %ptr, i32 %val) {
+; GFX1250-LABEL: global_system_atomic_umax_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umax ptr addrspace(1) %ptr, i32 %val monotonic
+ ret i32 %result
+}
+
+define i64 @global_one_as_atomic_min_i64(ptr addrspace(1) %ptr, i64 %val) {
+; GFX1250-LABEL: global_one_as_atomic_min_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_i64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw min ptr addrspace(1) %ptr, i64 %val syncscope("one-as") monotonic
+ ret i64 %result
+}
+
+define i64 @global_system_atomic_min_i64(ptr addrspace(1) %ptr, i64 %val) {
+; GFX1250-LABEL: global_system_atomic_min_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_i64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw min ptr addrspace(1) %ptr, i64 %val monotonic
+ ret i64 %result
+}
+
+define i64 @global_one_as_atomic_max_i64(ptr addrspace(1) %ptr, i64 %val) {
+; GFX1250-LABEL: global_one_as_atomic_max_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_i64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw max ptr addrspace(1) %ptr, i64 %val syncscope("one-as") monotonic
+ ret i64 %result
+}
+
+define i64 @global_system_atomic_max_i64(ptr addrspace(1) %ptr, i64 %val) {
+; GFX1250-LABEL: global_system_atomic_max_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_i64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw max ptr addrspace(1) %ptr, i64 %val monotonic
+ ret i64 %result
+}
+
+define i64 @global_one_as_atomic_umin_i64(ptr addrspace(1) %ptr, i64 %val) {
+; GFX1250-LABEL: global_one_as_atomic_umin_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_u64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umin ptr addrspace(1) %ptr, i64 %val syncscope("one-as") monotonic
+ ret i64 %result
+}
+
+define i64 @global_system_atomic_umin_i64(ptr addrspace(1) %ptr, i64 %val) {
+; GFX1250-LABEL: global_system_atomic_umin_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_u64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umin ptr addrspace(1) %ptr, i64 %val monotonic
+ ret i64 %result
+}
+
+define i64 @global_one_as_atomic_umax_i64(ptr addrspace(1) %ptr, i64 %val) {
+; GFX1250-LABEL: global_one_as_atomic_umax_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_u64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umax ptr addrspace(1) %ptr, i64 %val syncscope("one-as") monotonic
+ ret i64 %result
+}
+
+define i64 @global_system_atomic_umax_i64(ptr addrspace(1) %ptr, i64 %val) {
+; GFX1250-LABEL: global_system_atomic_umax_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_u64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umax ptr addrspace(1) %ptr, i64 %val monotonic
+ ret i64 %result
+}
+
+define i16 @global_one_as_atomic_min_i16(ptr addrspace(1) %ptr, i16 %val) {
+; GFX1250-LABEL: global_one_as_atomic_min_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v3, v0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX1250-NEXT: global_load_b32 v5, v[0:1], off
+; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_not_b32_e32 v4, v4
+; GFX1250-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v7, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7
+; GFX1250-NEXT: v_min_i16 v5, v5, v2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5
+; GFX1250-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[6:7], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB28_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw min ptr addrspace(1) %ptr, i16 %val syncscope("one-as") monotonic
+ ret i16 %result
+}
+
+define i16 @global_one_as_atomic_umin_i16(ptr addrspace(1) %ptr, i16 %val) {
+; GFX1250-LABEL: global_one_as_atomic_umin_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v3, v0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX1250-NEXT: global_load_b32 v5, v[0:1], off
+; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_not_b32_e32 v4, v4
+; GFX1250-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v7, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7
+; GFX1250-NEXT: v_min_u16 v5, v5, v2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5
+; GFX1250-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[6:7], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB29_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umin ptr addrspace(1) %ptr, i16 %val syncscope("one-as") monotonic
+ ret i16 %result
+}
+
+define i16 @global_one_as_atomic_max_i16(ptr addrspace(1) %ptr, i16 %val) {
+; GFX1250-LABEL: global_one_as_atomic_max_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v3, v0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX1250-NEXT: global_load_b32 v5, v[0:1], off
+; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_not_b32_e32 v4, v4
+; GFX1250-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v7, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7
+; GFX1250-NEXT: v_max_i16 v5, v5, v2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5
+; GFX1250-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[6:7], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB30_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw max ptr addrspace(1) %ptr, i16 %val syncscope("one-as") monotonic
+ ret i16 %result
+}
+
+define i16 @global_one_as_atomic_umax_i16(ptr addrspace(1) %ptr, i16 %val) {
+; GFX1250-LABEL: global_one_as_atomic_umax_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v3, v0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX1250-NEXT: global_load_b32 v5, v[0:1], off
+; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_not_b32_e32 v4, v4
+; GFX1250-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v7, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7
+; GFX1250-NEXT: v_max_u16 v5, v5, v2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5
+; GFX1250-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[6:7], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB31_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umax ptr addrspace(1) %ptr, i16 %val syncscope("one-as") monotonic
+ ret i16 %result
+}
+
+define float @flat_system_atomic_fadd_f32(ptr %ptr, float %val) {
+; GFX1250-LABEL: flat_system_atomic_fadd_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_add_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fadd ptr %ptr, float %val monotonic
+ ret float %result
+}
+
+define float @flat_one_as_atomic_fadd_f32(ptr %ptr, float %val) {
+; GFX1250-LABEL: flat_one_as_atomic_fadd_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_add_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fadd ptr %ptr, float %val syncscope("one-as") monotonic
+ ret float %result
+}
+
+define double @flat_system_atomic_fadd_f64(ptr %ptr, double %val) {
+; GFX1250-LABEL: flat_system_atomic_fadd_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b64 s[0:1], src_shared_base
+; GFX1250-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB34_6
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.check.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s1, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s1, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s1, exec_lo, s1
+; GFX1250-NEXT: s_cbranch_execz .LBB34_3
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.global
+; GFX1250-NEXT: global_atomic_add_f64 v[4:5], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB34_3: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s1, s1
+; GFX1250-NEXT: s_cbranch_execz .LBB34_5
+; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s2, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB34_5: ; %Flow1
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB34_6: ; %Flow2
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB34_8
+; GFX1250-NEXT: ; %bb.7: ; %atomicrmw.shared
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ds_add_rtn_f64 v[4:5], v0, v[2:3]
+; GFX1250-NEXT: .LBB34_8: ; %atomicrmw.phi
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fadd ptr %ptr, double %val monotonic
+ ret double %result
+}
+
+define double @flat_one_as_atomic_fadd_f64(ptr %ptr, double %val) {
+; GFX1250-LABEL: flat_one_as_atomic_fadd_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b64 s[0:1], src_shared_base
+; GFX1250-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB35_6
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.check.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s1, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s1, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s1, exec_lo, s1
+; GFX1250-NEXT: s_cbranch_execz .LBB35_3
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.global
+; GFX1250-NEXT: global_atomic_add_f64 v[4:5], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB35_3: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s1, s1
+; GFX1250-NEXT: s_cbranch_execz .LBB35_5
+; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s2, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB35_5: ; %Flow1
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB35_6: ; %Flow2
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB35_8
+; GFX1250-NEXT: ; %bb.7: ; %atomicrmw.shared
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ds_add_rtn_f64 v[4:5], v0, v[2:3]
+; GFX1250-NEXT: .LBB35_8: ; %atomicrmw.phi
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fadd ptr %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define float @flat_system_atomic_fmin_f32(ptr %ptr, float %val) {
+; GFX1250-LABEL: flat_system_atomic_fmin_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_min_num_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmin ptr %ptr, float %val monotonic
+ ret float %result
+}
+
+define float @flat_one_as_atomic_fmin_f32(ptr %ptr, float %val) {
+; GFX1250-LABEL: flat_one_as_atomic_fmin_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_min_num_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmin ptr %ptr, float %val syncscope("one-as") monotonic
+ ret float %result
+}
+
+define double @flat_system_atomic_fmin_f64(ptr %ptr, double %val) {
+; GFX1250-LABEL: flat_system_atomic_fmin_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB38_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_min_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB38_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB38_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
+; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB38_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmin ptr %ptr, double %val monotonic
+ ret double %result
+}
+
+define double @flat_one_as_atomic_fmin_f64(ptr %ptr, double %val) {
+; GFX1250-LABEL: flat_one_as_atomic_fmin_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB39_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_min_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB39_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB39_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
+; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB39_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmin ptr %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define float @flat_system_atomic_fmax_f32(ptr %ptr, float %val) {
+; GFX1250-LABEL: flat_system_atomic_fmax_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_max_num_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmax ptr %ptr, float %val monotonic
+ ret float %result
+}
+
+define float @flat_one_as_atomic_fmax_f32(ptr %ptr, float %val) {
+; GFX1250-LABEL: flat_one_as_atomic_fmax_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_max_num_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmax ptr %ptr, float %val syncscope("one-as") monotonic
+ ret float %result
+}
+
+define double @flat_system_atomic_fmax_f64(ptr %ptr, double %val) {
+; GFX1250-LABEL: flat_system_atomic_fmax_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB42_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_max_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB42_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB42_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
+; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB42_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmax ptr %ptr, double %val monotonic
+ ret double %result
+}
+
+define double @flat_one_as_atomic_fmax_f64(ptr %ptr, double %val) {
+; GFX1250-LABEL: flat_one_as_atomic_fmax_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB43_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_max_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB43_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB43_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
+; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB43_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmax ptr %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define i32 @flat_one_as_atomic_min_i32(ptr %ptr, i32 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_min_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_min_i32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw min ptr %ptr, i32 %val syncscope("one-as") monotonic
+ ret i32 %result
+}
+
+define i32 @flat_system_atomic_min_i32(ptr %ptr, i32 %val) {
+; GFX1250-LABEL: flat_system_atomic_min_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_min_i32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw min ptr %ptr, i32 %val monotonic
+ ret i32 %result
+}
+
+define i32 @flat_one_as_atomic_max_i32(ptr %ptr, i32 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_max_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_max_i32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw max ptr %ptr, i32 %val syncscope("one-as") monotonic
+ ret i32 %result
+}
+
+define i32 @flat_system_atomic_max_i32(ptr %ptr, i32 %val) {
+; GFX1250-LABEL: flat_system_atomic_max_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_max_i32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw max ptr %ptr, i32 %val monotonic
+ ret i32 %result
+}
+
+define i32 @flat_one_as_atomic_umin_i32(ptr %ptr, i32 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_umin_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_min_u32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umin ptr %ptr, i32 %val syncscope("one-as") monotonic
+ ret i32 %result
+}
+
+define i32 @flat_system_atomic_umin_i32(ptr %ptr, i32 %val) {
+; GFX1250-LABEL: flat_system_atomic_umin_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_min_u32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umin ptr %ptr, i32 %val monotonic
+ ret i32 %result
+}
+
+define i32 @flat_one_as_atomic_umax_i32(ptr %ptr, i32 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_umax_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_max_u32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umax ptr %ptr, i32 %val syncscope("one-as") monotonic
+ ret i32 %result
+}
+
+define i32 @flat_system_atomic_umax_i32(ptr %ptr, i32 %val) {
+; GFX1250-LABEL: flat_system_atomic_umax_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_max_u32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umax ptr %ptr, i32 %val monotonic
+ ret i32 %result
+}
+
+define i64 @flat_one_as_atomic_min_i64(ptr %ptr, i64 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_min_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB52_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_min_i64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB52_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB52_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_min_i64 v[0:1], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB52_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw min ptr %ptr, i64 %val syncscope("one-as") monotonic
+ ret i64 %result
+}
+
+define i64 @flat_system_atomic_min_i64(ptr %ptr, i64 %val) {
+; GFX1250-LABEL: flat_system_atomic_min_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB53_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_min_i64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB53_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB53_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_min_i64 v[0:1], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB53_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw min ptr %ptr, i64 %val monotonic
+ ret i64 %result
+}
+
+define i64 @flat_one_as_atomic_max_i64(ptr %ptr, i64 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_max_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB54_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_max_i64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB54_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB54_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_max_i64 v[0:1], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB54_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw max ptr %ptr, i64 %val syncscope("one-as") monotonic
+ ret i64 %result
+}
+
+define i64 @flat_system_atomic_max_i64(ptr %ptr, i64 %val) {
+; GFX1250-LABEL: flat_system_atomic_max_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB55_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_max_i64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB55_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB55_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_max_i64 v[0:1], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB55_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw max ptr %ptr, i64 %val monotonic
+ ret i64 %result
+}
+
+define i64 @flat_one_as_atomic_umin_i64(ptr %ptr, i64 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_umin_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB56_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_min_u64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB56_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB56_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_min_u64 v[0:1], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB56_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umin ptr %ptr, i64 %val syncscope("one-as") monotonic
+ ret i64 %result
+}
+
+define i64 @flat_system_atomic_umin_i64(ptr %ptr, i64 %val) {
+; GFX1250-LABEL: flat_system_atomic_umin_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB57_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_min_u64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB57_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB57_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_min_u64 v[0:1], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB57_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umin ptr %ptr, i64 %val monotonic
+ ret i64 %result
+}
+
+define i64 @flat_one_as_atomic_umax_i64(ptr %ptr, i64 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_umax_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB58_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_max_u64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB58_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB58_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_max_u64 v[0:1], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB58_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umax ptr %ptr, i64 %val syncscope("one-as") monotonic
+ ret i64 %result
+}
+
+define i64 @flat_system_atomic_umax_i64(ptr %ptr, i64 %val) {
+; GFX1250-LABEL: flat_system_atomic_umax_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB59_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_max_u64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB59_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB59_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_max_u64 v[0:1], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB59_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umax ptr %ptr, i64 %val monotonic
+ ret i64 %result
+}
+
+define i16 @flat_one_as_atomic_min_i16(ptr %ptr, i16 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_min_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v3, v0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX1250-NEXT: flat_load_b32 v5, v[0:1]
+; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_not_b32_e32 v4, v4
+; GFX1250-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v7, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7
+; GFX1250-NEXT: v_min_i16 v5, v5, v2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5
+; GFX1250-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[6:7] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB60_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw min ptr %ptr, i16 %val syncscope("one-as") monotonic
+ ret i16 %result
+}
+
+define i16 @flat_one_as_atomic_umin_i16(ptr %ptr, i16 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_umin_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v3, v0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX1250-NEXT: flat_load_b32 v5, v[0:1]
+; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_not_b32_e32 v4, v4
+; GFX1250-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v7, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7
+; GFX1250-NEXT: v_min_u16 v5, v5, v2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5
+; GFX1250-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[6:7] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB61_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umin ptr %ptr, i16 %val syncscope("one-as") monotonic
+ ret i16 %result
+}
+
+define i16 @flat_one_as_atomic_max_i16(ptr %ptr, i16 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_max_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v3, v0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX1250-NEXT: flat_load_b32 v5, v[0:1]
+; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_not_b32_e32 v4, v4
+; GFX1250-NEXT: .LBB62_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v7, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7
+; GFX1250-NEXT: v_max_i16 v5, v5, v2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5
+; GFX1250-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[6:7] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB62_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw max ptr %ptr, i16 %val syncscope("one-as") monotonic
+ ret i16 %result
+}
+
+define i16 @flat_one_as_atomic_umax_i16(ptr %ptr, i16 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_umax_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v3, v0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX1250-NEXT: flat_load_b32 v5, v[0:1]
+; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_not_b32_e32 v4, v4
+; GFX1250-NEXT: .LBB63_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v7, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7
+; GFX1250-NEXT: v_max_u16 v5, v5, v2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5
+; GFX1250-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[6:7] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB63_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umax ptr %ptr, i16 %val syncscope("one-as") monotonic
+ ret i16 %result
+}
diff --git a/llvm/test/CodeGen/AMDGPU/empty-text.ll b/llvm/test/CodeGen/AMDGPU/empty-text.ll
new file mode 100644
index 0000000..8aa8600
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/empty-text.ll
@@ -0,0 +1,9 @@
+; Test that there is no s_code_end padding if .text is otherwise empty.
+
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1200 < %s | FileCheck %s --check-prefixes=GCN
+
+@globalVar = global i32 37
+
+declare amdgpu_ps void @funcDecl()
+
+; GCN-NOT: .fill
diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
index 2ff66c9..7d36c9f 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
@@ -252,13 +252,15 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-LABEL: flat_xchg_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB10_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -277,9 +279,11 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB10_2
; GFX1250-SDAG-NEXT: .LBB10_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
@@ -292,15 +296,16 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB10_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -314,13 +319,16 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-GISEL-NEXT: flat_atomic_swap_b64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB10_2
; GFX1250-GISEL-NEXT: .LBB10_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[4:5], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
@@ -344,11 +352,13 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB11_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -367,8 +377,11 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB11_2
; GFX1250-SDAG-NEXT: .LBB11_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
@@ -381,18 +394,19 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB11_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -406,13 +420,16 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-GISEL-NEXT: flat_atomic_swap_b64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB11_2
; GFX1250-GISEL-NEXT: .LBB11_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[4:5], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
@@ -433,11 +450,13 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB12_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -455,9 +474,11 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB12_2
; GFX1250-SDAG-NEXT: .LBB12_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_store_b64 v0, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
@@ -465,13 +486,14 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB12_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -483,14 +505,17 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: flat_atomic_swap_b64 v0, v[4:5], s[2:3] scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB12_2
; GFX1250-GISEL-NEXT: .LBB12_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_store_b64 v0, v[4:5], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
@@ -508,10 +533,12 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB13_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -529,8 +556,11 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB13_2
; GFX1250-SDAG-NEXT: .LBB13_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_store_b64 v0, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
@@ -538,16 +568,17 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB13_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -559,14 +590,17 @@ define amdgpu_ps void @flat_xchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-GISEL-NEXT: flat_atomic_swap_b64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB13_2
; GFX1250-GISEL-NEXT: .LBB13_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_store_b64 v0, v[4:5], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
%zext.offset = zext i32 %voffset to i64
@@ -642,13 +676,15 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-LABEL: flat_add_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB18_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -667,9 +703,11 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB18_2
; GFX1250-SDAG-NEXT: .LBB18_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], v[0:1], v[2:3]
@@ -683,15 +721,16 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB18_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -705,13 +744,16 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: flat_atomic_add_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB18_2
; GFX1250-GISEL-NEXT: .LBB18_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], v[0:1], v[4:5]
@@ -736,11 +778,13 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB19_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -759,8 +803,11 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB19_2
; GFX1250-SDAG-NEXT: .LBB19_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], v[0:1], v[2:3]
@@ -774,18 +821,19 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB19_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -799,13 +847,16 @@ define amdgpu_ps <2 x float> @flat_add_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: flat_atomic_add_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB19_2
; GFX1250-GISEL-NEXT: .LBB19_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], v[0:1], v[4:5]
@@ -827,11 +878,13 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB20_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -849,9 +902,11 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB20_2
; GFX1250-SDAG-NEXT: .LBB20_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], v[0:1], v[2:3]
@@ -862,13 +917,14 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB20_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -880,14 +936,17 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: flat_atomic_add_u64 v0, v[4:5], s[2:3] scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB20_2
; GFX1250-GISEL-NEXT: .LBB20_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[0:1], v[0:1], v[4:5]
@@ -908,10 +967,12 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB21_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -929,8 +990,11 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB21_2
; GFX1250-SDAG-NEXT: .LBB21_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], v[0:1], v[2:3]
@@ -941,16 +1005,17 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB21_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -962,14 +1027,17 @@ define amdgpu_ps void @flat_add_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: flat_atomic_add_u64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB21_2
; GFX1250-GISEL-NEXT: .LBB21_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[0:1], v[0:1], v[4:5]
@@ -1048,13 +1116,15 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-LABEL: flat_sub_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB26_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -1073,9 +1143,11 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB26_2
; GFX1250-SDAG-NEXT: .LBB26_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_sub_nc_u64_e32 v[2:3], v[0:1], v[2:3]
@@ -1089,15 +1161,16 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB26_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -1111,13 +1184,16 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: flat_atomic_sub_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB26_2
; GFX1250-GISEL-NEXT: .LBB26_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_sub_nc_u64_e32 v[2:3], v[0:1], v[4:5]
@@ -1142,11 +1218,13 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB27_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -1165,8 +1243,11 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB27_2
; GFX1250-SDAG-NEXT: .LBB27_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_sub_nc_u64_e32 v[2:3], v[0:1], v[2:3]
@@ -1180,18 +1261,19 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB27_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -1205,13 +1287,16 @@ define amdgpu_ps <2 x float> @flat_sub_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: flat_atomic_sub_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB27_2
; GFX1250-GISEL-NEXT: .LBB27_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_sub_nc_u64_e32 v[2:3], v[0:1], v[4:5]
@@ -1233,11 +1318,13 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB28_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -1255,9 +1342,11 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB28_2
; GFX1250-SDAG-NEXT: .LBB28_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_sub_nc_u64_e32 v[0:1], v[0:1], v[2:3]
@@ -1268,13 +1357,14 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB28_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -1286,14 +1376,17 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: flat_atomic_sub_u64 v0, v[4:5], s[2:3] scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB28_2
; GFX1250-GISEL-NEXT: .LBB28_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_sub_nc_u64_e32 v[0:1], v[0:1], v[4:5]
@@ -1314,10 +1407,12 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB29_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -1335,8 +1430,11 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB29_2
; GFX1250-SDAG-NEXT: .LBB29_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_sub_nc_u64_e32 v[0:1], v[0:1], v[2:3]
@@ -1347,16 +1445,17 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB29_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -1368,14 +1467,17 @@ define amdgpu_ps void @flat_sub_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: flat_atomic_sub_u64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB29_2
; GFX1250-GISEL-NEXT: .LBB29_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_sub_nc_u64_e32 v[0:1], v[0:1], v[4:5]
@@ -1454,13 +1556,15 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-LABEL: flat_and_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB34_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -1479,9 +1583,11 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB34_2
; GFX1250-SDAG-NEXT: .LBB34_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_and_b32_e32 v3, v1, v3
@@ -1496,15 +1602,16 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB34_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -1518,13 +1625,16 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: flat_atomic_and_b64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB34_2
; GFX1250-GISEL-NEXT: .LBB34_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, v0, v4
@@ -1550,11 +1660,13 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB35_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -1573,8 +1685,11 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB35_2
; GFX1250-SDAG-NEXT: .LBB35_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_and_b32_e32 v3, v1, v3
@@ -1589,18 +1704,19 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB35_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -1614,13 +1730,16 @@ define amdgpu_ps <2 x float> @flat_and_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: flat_atomic_and_b64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB35_2
; GFX1250-GISEL-NEXT: .LBB35_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, v0, v4
@@ -1643,11 +1762,13 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB36_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -1665,9 +1786,11 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB36_2
; GFX1250-SDAG-NEXT: .LBB36_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_and_b32_e32 v1, v1, v3
@@ -1679,13 +1802,14 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB36_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -1697,14 +1821,17 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: flat_atomic_and_b64 v0, v[4:5], s[2:3] scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB36_2
; GFX1250-GISEL-NEXT: .LBB36_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, v0, v4
@@ -1726,10 +1853,12 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB37_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -1747,8 +1876,11 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB37_2
; GFX1250-SDAG-NEXT: .LBB37_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_and_b32_e32 v1, v1, v3
@@ -1760,16 +1892,17 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB37_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -1781,14 +1914,17 @@ define amdgpu_ps void @flat_and_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: flat_atomic_and_b64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB37_2
; GFX1250-GISEL-NEXT: .LBB37_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, v0, v4
@@ -1868,13 +2004,15 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs
; GFX1250-SDAG-LABEL: flat_or_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB42_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -1893,9 +2031,11 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB42_2
; GFX1250-SDAG-NEXT: .LBB42_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_or_b32_e32 v3, v1, v3
@@ -1910,15 +2050,16 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB42_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -1932,13 +2073,16 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn(ptr inreg %sbase, i32 %voffs
; GFX1250-GISEL-NEXT: flat_atomic_or_b64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB42_2
; GFX1250-GISEL-NEXT: .LBB42_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_or_b32_e32 v2, v0, v4
@@ -1964,11 +2108,13 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB43_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -1987,8 +2133,11 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB43_2
; GFX1250-SDAG-NEXT: .LBB43_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_or_b32_e32 v3, v1, v3
@@ -2003,18 +2152,19 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB43_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -2028,13 +2178,16 @@ define amdgpu_ps <2 x float> @flat_or_saddr_i64_rtn_neg128(ptr inreg %sbase, i32
; GFX1250-GISEL-NEXT: flat_atomic_or_b64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB43_2
; GFX1250-GISEL-NEXT: .LBB43_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_or_b32_e32 v2, v0, v4
@@ -2057,11 +2210,13 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB44_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -2079,9 +2234,11 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB44_2
; GFX1250-SDAG-NEXT: .LBB44_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_or_b32_e32 v1, v1, v3
@@ -2093,13 +2250,14 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB44_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -2111,14 +2269,17 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset, i
; GFX1250-GISEL-NEXT: flat_atomic_or_b64 v0, v[4:5], s[2:3] scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB44_2
; GFX1250-GISEL-NEXT: .LBB44_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_or_b32_e32 v0, v0, v4
@@ -2140,10 +2301,12 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB45_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -2161,8 +2324,11 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB45_2
; GFX1250-SDAG-NEXT: .LBB45_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_or_b32_e32 v1, v1, v3
@@ -2174,16 +2340,17 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB45_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -2195,14 +2362,17 @@ define amdgpu_ps void @flat_or_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vof
; GFX1250-GISEL-NEXT: flat_atomic_or_b64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB45_2
; GFX1250-GISEL-NEXT: .LBB45_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_or_b32_e32 v0, v0, v4
@@ -2282,13 +2452,15 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-LABEL: flat_xor_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB50_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -2307,9 +2479,11 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB50_2
; GFX1250-SDAG-NEXT: .LBB50_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_xor_b32_e32 v3, v1, v3
@@ -2324,15 +2498,16 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB50_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -2346,13 +2521,16 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: flat_atomic_xor_b64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB50_2
; GFX1250-GISEL-NEXT: .LBB50_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_xor_b32_e32 v2, v0, v4
@@ -2378,11 +2556,13 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB51_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -2401,8 +2581,11 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB51_2
; GFX1250-SDAG-NEXT: .LBB51_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_xor_b32_e32 v3, v1, v3
@@ -2417,18 +2600,19 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB51_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -2442,13 +2626,16 @@ define amdgpu_ps <2 x float> @flat_xor_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: flat_atomic_xor_b64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB51_2
; GFX1250-GISEL-NEXT: .LBB51_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_xor_b32_e32 v2, v0, v4
@@ -2471,11 +2658,13 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB52_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -2493,9 +2682,11 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB52_2
; GFX1250-SDAG-NEXT: .LBB52_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_xor_b32_e32 v1, v1, v3
@@ -2507,13 +2698,14 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB52_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -2525,14 +2717,17 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: flat_atomic_xor_b64 v0, v[4:5], s[2:3] scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB52_2
; GFX1250-GISEL-NEXT: .LBB52_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v0, v4
@@ -2554,10 +2749,12 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB53_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -2575,8 +2772,11 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB53_2
; GFX1250-SDAG-NEXT: .LBB53_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_xor_b32_e32 v1, v1, v3
@@ -2588,16 +2788,17 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB53_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -2609,14 +2810,17 @@ define amdgpu_ps void @flat_xor_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: flat_atomic_xor_b64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV
; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB53_2
; GFX1250-GISEL-NEXT: .LBB53_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_xor_b32_e32 v0, v0, v4
@@ -2690,13 +2894,15 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-LABEL: flat_max_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB58_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -2715,10 +2921,12 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB58_2
; GFX1250-SDAG-NEXT: .LBB58_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_max_i64 v[2:3], v[0:1], v[2:3]
@@ -2732,15 +2940,16 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB58_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -2753,15 +2962,18 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: .LBB58_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_max_i64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN
; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB58_2
; GFX1250-GISEL-NEXT: .LBB58_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_max_i64 v[2:3], v[0:1], v[4:5]
@@ -2786,11 +2998,13 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB59_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -2809,9 +3023,12 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB59_2
; GFX1250-SDAG-NEXT: .LBB59_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_max_i64 v[2:3], v[0:1], v[2:3]
@@ -2825,18 +3042,19 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB59_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -2849,15 +3067,18 @@ define amdgpu_ps <2 x float> @flat_max_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: .LBB59_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_max_i64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN
; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB59_2
; GFX1250-GISEL-NEXT: .LBB59_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_max_i64 v[2:3], v[0:1], v[4:5]
@@ -2879,11 +3100,13 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB60_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -2900,9 +3123,11 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB60_2
; GFX1250-SDAG-NEXT: .LBB60_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_max_i64 v[0:1], v[0:1], v[2:3]
@@ -2913,13 +3138,14 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB60_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -2930,14 +3156,17 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: .LBB60_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_max_i64 v0, v[4:5], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB60_2
; GFX1250-GISEL-NEXT: .LBB60_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_max_i64 v[0:1], v[0:1], v[4:5]
@@ -2958,10 +3187,12 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB61_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -2978,8 +3209,11 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB61_2
; GFX1250-SDAG-NEXT: .LBB61_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_max_i64 v[0:1], v[0:1], v[2:3]
@@ -2990,16 +3224,17 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB61_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -3010,14 +3245,17 @@ define amdgpu_ps void @flat_max_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: .LBB61_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_max_i64 v0, v[4:5], s[2:3] offset:-128
; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB61_2
; GFX1250-GISEL-NEXT: .LBB61_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_max_i64 v[0:1], v[0:1], v[4:5]
@@ -3090,13 +3328,15 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-LABEL: flat_min_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB66_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -3115,10 +3355,12 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB66_2
; GFX1250-SDAG-NEXT: .LBB66_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_min_i64 v[2:3], v[0:1], v[2:3]
@@ -3132,15 +3374,16 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB66_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -3153,15 +3396,18 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: .LBB66_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_min_i64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN
; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB66_2
; GFX1250-GISEL-NEXT: .LBB66_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_min_i64 v[2:3], v[0:1], v[4:5]
@@ -3186,11 +3432,13 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB67_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -3209,9 +3457,12 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB67_2
; GFX1250-SDAG-NEXT: .LBB67_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_min_i64 v[2:3], v[0:1], v[2:3]
@@ -3225,18 +3476,19 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB67_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -3249,15 +3501,18 @@ define amdgpu_ps <2 x float> @flat_min_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: .LBB67_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_min_i64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN
; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB67_2
; GFX1250-GISEL-NEXT: .LBB67_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_min_i64 v[2:3], v[0:1], v[4:5]
@@ -3279,11 +3534,13 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB68_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -3300,9 +3557,11 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB68_2
; GFX1250-SDAG-NEXT: .LBB68_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_min_i64 v[0:1], v[0:1], v[2:3]
@@ -3313,13 +3572,14 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB68_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -3330,14 +3590,17 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: .LBB68_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_min_i64 v0, v[4:5], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB68_2
; GFX1250-GISEL-NEXT: .LBB68_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_min_i64 v[0:1], v[0:1], v[4:5]
@@ -3358,10 +3621,12 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB69_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -3378,8 +3643,11 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB69_2
; GFX1250-SDAG-NEXT: .LBB69_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_min_i64 v[0:1], v[0:1], v[2:3]
@@ -3390,16 +3658,17 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB69_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -3410,14 +3679,17 @@ define amdgpu_ps void @flat_min_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: .LBB69_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_min_i64 v0, v[4:5], s[2:3] offset:-128
; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB69_2
; GFX1250-GISEL-NEXT: .LBB69_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_min_i64 v[0:1], v[0:1], v[4:5]
@@ -3490,13 +3762,15 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-LABEL: flat_umax_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB74_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -3515,10 +3789,12 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB74_2
; GFX1250-SDAG-NEXT: .LBB74_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_max_u64 v[2:3], v[0:1], v[2:3]
@@ -3532,15 +3808,16 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB74_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -3553,15 +3830,18 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-GISEL-NEXT: .LBB74_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_max_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN
; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB74_2
; GFX1250-GISEL-NEXT: .LBB74_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_max_u64 v[2:3], v[0:1], v[4:5]
@@ -3586,11 +3866,13 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB75_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -3609,9 +3891,12 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB75_2
; GFX1250-SDAG-NEXT: .LBB75_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_max_u64 v[2:3], v[0:1], v[2:3]
@@ -3625,18 +3910,19 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB75_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -3649,15 +3935,18 @@ define amdgpu_ps <2 x float> @flat_umax_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-GISEL-NEXT: .LBB75_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_max_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN
; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB75_2
; GFX1250-GISEL-NEXT: .LBB75_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_max_u64 v[2:3], v[0:1], v[4:5]
@@ -3679,11 +3968,13 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB76_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -3700,9 +3991,11 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB76_2
; GFX1250-SDAG-NEXT: .LBB76_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_max_u64 v[0:1], v[0:1], v[2:3]
@@ -3713,13 +4006,14 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB76_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -3730,14 +4024,17 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: .LBB76_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_max_u64 v0, v[4:5], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB76_2
; GFX1250-GISEL-NEXT: .LBB76_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_max_u64 v[0:1], v[0:1], v[4:5]
@@ -3758,10 +4055,12 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB77_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -3778,8 +4077,11 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB77_2
; GFX1250-SDAG-NEXT: .LBB77_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_max_u64 v[0:1], v[0:1], v[2:3]
@@ -3790,16 +4092,17 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB77_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -3810,14 +4113,17 @@ define amdgpu_ps void @flat_umax_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-GISEL-NEXT: .LBB77_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_max_u64 v0, v[4:5], s[2:3] offset:-128
; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB77_2
; GFX1250-GISEL-NEXT: .LBB77_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_max_u64 v[0:1], v[0:1], v[4:5]
@@ -3890,13 +4196,15 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-LABEL: flat_umin_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB82_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -3915,10 +4223,12 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB82_2
; GFX1250-SDAG-NEXT: .LBB82_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_min_u64 v[2:3], v[0:1], v[2:3]
@@ -3932,15 +4242,16 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB82_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -3953,15 +4264,18 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-GISEL-NEXT: .LBB82_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_min_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN
; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB82_2
; GFX1250-GISEL-NEXT: .LBB82_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_min_u64 v[2:3], v[0:1], v[4:5]
@@ -3986,11 +4300,13 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB83_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -4009,9 +4325,12 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB83_2
; GFX1250-SDAG-NEXT: .LBB83_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_min_u64 v[2:3], v[0:1], v[2:3]
@@ -4025,18 +4344,19 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB83_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -4049,15 +4369,18 @@ define amdgpu_ps <2 x float> @flat_umin_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-GISEL-NEXT: .LBB83_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_min_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN
; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB83_2
; GFX1250-GISEL-NEXT: .LBB83_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_min_u64 v[2:3], v[0:1], v[4:5]
@@ -4079,11 +4402,13 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB84_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -4100,9 +4425,11 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB84_2
; GFX1250-SDAG-NEXT: .LBB84_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_min_u64 v[0:1], v[0:1], v[2:3]
@@ -4113,13 +4440,14 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB84_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -4130,14 +4458,17 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: .LBB84_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_min_u64 v0, v[4:5], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB84_2
; GFX1250-GISEL-NEXT: .LBB84_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_min_u64 v[0:1], v[0:1], v[4:5]
@@ -4158,10 +4489,12 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB85_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -4178,8 +4511,11 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB85_2
; GFX1250-SDAG-NEXT: .LBB85_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_min_u64 v[0:1], v[0:1], v[2:3]
@@ -4190,16 +4526,17 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB85_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -4210,14 +4547,17 @@ define amdgpu_ps void @flat_umin_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %v
; GFX1250-GISEL-NEXT: .LBB85_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_min_u64 v0, v[4:5], s[2:3] offset:-128
; GFX1250-GISEL-NEXT: s_wait_dscnt 0x0
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB85_2
; GFX1250-GISEL-NEXT: .LBB85_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_min_u64 v[0:1], v[0:1], v[4:5]
@@ -4310,14 +4650,16 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 %
; GFX1250-SDAG-LABEL: flat_cmpxchg_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v5, v4
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v4, v3
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], s[2:3], v[0:1]
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v3
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB90_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -4338,9 +4680,11 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 %
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB90_2
; GFX1250-SDAG-NEXT: .LBB90_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v8, -1, v2, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v2
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v8, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v8, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7]
@@ -4356,15 +4700,16 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 %
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v8, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v6, v3
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v7, v4
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v0, v5
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_bitop2_b32 v0, s0, v3 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB90_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -4380,13 +4725,16 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn(ptr inreg %sbase, i32 %
; GFX1250-GISEL-NEXT: flat_atomic_cmpswap_b64 v[0:1], v5, v[6:9], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_SYS
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr8_vgpr9
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB90_2
; GFX1250-GISEL-NEXT: .LBB90_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
@@ -4414,11 +4762,13 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[2:3], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v3
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB91_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -4439,8 +4789,11 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB91_2
; GFX1250-SDAG-NEXT: .LBB91_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v8, -1, v2, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v2
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v8, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v8, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7]
@@ -4456,18 +4809,19 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v8, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v6, v3
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v7, v4
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v5
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v7, v4 :: v_dual_bitop2_b32 v0, s0, v3 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB91_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -4483,13 +4837,16 @@ define amdgpu_ps <2 x float> @flat_cmpxchg_saddr_i64_rtn_neg128(ptr inreg %sbase
; GFX1250-GISEL-NEXT: flat_atomic_cmpswap_b64 v[0:1], v5, v[6:9], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_SYS
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr8_vgpr9
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB91_2
; GFX1250-GISEL-NEXT: .LBB91_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
@@ -4512,13 +4869,15 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs
; GFX1250-SDAG-LABEL: flat_cmpxchg_saddr_i64_nortn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v7, v2 :: v_dual_mov_b32 v6, v1
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v5, v4
-; GFX1250-SDAG-NEXT: v_mov_b32_e32 v4, v3
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, s0, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v2
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB92_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -4538,9 +4897,11 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB92_2
; GFX1250-SDAG-NEXT: .LBB92_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v2, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7]
@@ -4553,13 +4914,14 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v9, v2
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB92_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -4573,14 +4935,17 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn(ptr inreg %sbase, i32 %voffs
; GFX1250-GISEL-NEXT: flat_atomic_cmpswap_b64 v0, v[6:9], s[2:3] scope:SCOPE_SYS
; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_SYS
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr8_vgpr9
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB92_2
; GFX1250-GISEL-NEXT: .LBB92_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
@@ -4603,10 +4968,12 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v2, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v2
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB93_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -4626,8 +4993,11 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB93_2
; GFX1250-SDAG-NEXT: .LBB93_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v2, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7]
@@ -4640,16 +5010,17 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v9, v2
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB93_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -4663,14 +5034,17 @@ define amdgpu_ps void @flat_cmpxchg_saddr_i64_nortn_neg128(ptr inreg %sbase, i32
; GFX1250-GISEL-NEXT: flat_atomic_cmpswap_b64 v0, v[6:9], s[2:3] offset:-128 scope:SCOPE_SYS
; GFX1250-GISEL-NEXT: s_wait_storecnt_dscnt 0x0
; GFX1250-GISEL-NEXT: global_inv scope:SCOPE_SYS
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr8_vgpr9
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB93_2
; GFX1250-GISEL-NEXT: .LBB93_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
@@ -4742,13 +5116,15 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-LABEL: flat_inc_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB98_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -4766,15 +5142,16 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB98_2
; GFX1250-SDAG-NEXT: .LBB98_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], 1, v[0:1]
; GFX1250-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, 0, v5 :: v_dual_cndmask_b32 v2, 0, v4
; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
@@ -4786,15 +5163,16 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB98_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -4806,21 +5184,24 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: s_branch .LBB98_5
; GFX1250-GISEL-NEXT: .LBB98_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_inc_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB98_2
; GFX1250-GISEL-NEXT: .LBB98_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1]
; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
@@ -4843,11 +5224,13 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB99_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -4865,14 +5248,16 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB99_2
; GFX1250-SDAG-NEXT: .LBB99_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], 1, v[0:1]
; GFX1250-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v3, 0, v5 :: v_dual_cndmask_b32 v2, 0, v4
; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
@@ -4884,18 +5269,19 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB99_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -4907,21 +5293,24 @@ define amdgpu_ps <2 x float> @flat_inc_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: s_branch .LBB99_5
; GFX1250-GISEL-NEXT: .LBB99_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_inc_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB99_2
; GFX1250-GISEL-NEXT: .LBB99_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1]
; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[2:3], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
@@ -4941,11 +5330,13 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB100_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -4961,14 +5352,15 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB100_2
; GFX1250-SDAG-NEXT: .LBB100_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], 1, v[0:1]
; GFX1250-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, 0, v5 :: v_dual_cndmask_b32 v0, 0, v4
; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
@@ -4977,13 +5369,14 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB100_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -4993,20 +5386,23 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: s_endpgm
; GFX1250-GISEL-NEXT: .LBB100_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_inc_u64 v0, v[4:5], s[2:3] scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB100_2
; GFX1250-GISEL-NEXT: .LBB100_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1]
; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
@@ -5025,10 +5421,12 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB101_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -5044,13 +5442,15 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB101_2
; GFX1250-SDAG-NEXT: .LBB101_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], 1, v[0:1]
; GFX1250-SDAG-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX1250-SDAG-NEXT: v_dual_cndmask_b32 v1, 0, v5 :: v_dual_cndmask_b32 v0, 0, v4
; GFX1250-SDAG-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
@@ -5059,16 +5459,17 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB101_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -5078,20 +5479,23 @@ define amdgpu_ps void @flat_inc_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: s_endpgm
; GFX1250-GISEL-NEXT: .LBB101_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_inc_u64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB101_2
; GFX1250-GISEL-NEXT: .LBB101_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1]
; GFX1250-GISEL-NEXT: v_cmp_ge_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
@@ -5161,13 +5565,15 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-LABEL: flat_dec_saddr_i64_rtn:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[2:3], v[0:1]
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB106_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -5185,10 +5591,12 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s1, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB106_2
; GFX1250-SDAG-NEXT: .LBB106_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s0, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
@@ -5207,15 +5615,16 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB106_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -5227,15 +5636,18 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn(ptr inreg %sbase, i32 %voff
; GFX1250-GISEL-NEXT: s_branch .LBB106_5
; GFX1250-GISEL-NEXT: .LBB106_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_dec_u64 v[0:1], v3, v[4:5], s[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s1, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB106_2
; GFX1250-GISEL-NEXT: .LBB106_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
@@ -5265,11 +5677,13 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v0, s0, v5
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; GFX1250-SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v5
+; GFX1250-SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB107_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -5287,9 +5701,12 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s1, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB107_2
; GFX1250-SDAG-NEXT: .LBB107_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s0, v4
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
@@ -5308,18 +5725,19 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v4, v1
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v5, v2
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, v3
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, 0xffffff80, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v1, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_bitop2_b32 v0, s0, v7 bitop3:0x14
+; GFX1250-GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v7
+; GFX1250-GISEL-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB107_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -5331,15 +5749,18 @@ define amdgpu_ps <2 x float> @flat_dec_saddr_i64_rtn_neg128(ptr inreg %sbase, i3
; GFX1250-GISEL-NEXT: s_branch .LBB107_5
; GFX1250-GISEL-NEXT: .LBB107_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_dec_u64 v[0:1], v3, v[4:5], s[2:3] offset:-128 th:TH_ATOMIC_RETURN scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6_vgpr7
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr6
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s1, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB107_2
; GFX1250-GISEL-NEXT: .LBB107_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v6, vcc_lo
; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v6
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v6, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
@@ -5366,11 +5787,13 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB108_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -5386,9 +5809,11 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB108_2
; GFX1250-SDAG-NEXT: .LBB108_4: ; %atomicrmw.private
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
@@ -5404,13 +5829,14 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB108_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -5420,14 +5846,17 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn(ptr inreg %sbase, i32 %voffset,
; GFX1250-GISEL-NEXT: s_endpgm
; GFX1250-GISEL-NEXT: .LBB108_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_dec_u64 v0, v[4:5], s[2:3] scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB108_2
; GFX1250-GISEL-NEXT: .LBB108_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
@@ -5453,10 +5882,12 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
; GFX1250-SDAG-NEXT: v_add_nc_u64_e32 v[0:1], s[0:1], v[0:1]
-; GFX1250-SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-NEXT: v_xor_b32_e32 v4, s0, v1
; GFX1250-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-SDAG-NEXT: v_cmpx_lt_u32_e32 0x3ffffff, v4
; GFX1250-SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-SDAG-NEXT: s_cbranch_execnz .LBB109_3
; GFX1250-SDAG-NEXT: ; %bb.1: ; %Flow
@@ -5472,8 +5903,11 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-SDAG-NEXT: s_cbranch_execz .LBB109_2
; GFX1250-SDAG-NEXT: .LBB109_4: ; %atomicrmw.private
+; GFX1250-SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
-; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v4, s0, v0
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
; GFX1250-SDAG-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
@@ -5489,16 +5923,17 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: s_mov_b64 s[0:1], src_private_base
-; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v2, v0
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, 0xffffff80, v1
; GFX1250-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_xor_b32_e32 v1, s0, v3
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-GISEL-NEXT: v_cmpx_le_u32_e32 0x4000000, v1
; GFX1250-GISEL-NEXT: s_xor_b32 s0, exec_lo, s0
; GFX1250-GISEL-NEXT: s_cbranch_execnz .LBB109_3
; GFX1250-GISEL-NEXT: ; %bb.1: ; %Flow
@@ -5508,14 +5943,17 @@ define amdgpu_ps void @flat_dec_saddr_i64_nortn_neg128(ptr inreg %sbase, i32 %vo
; GFX1250-GISEL-NEXT: s_endpgm
; GFX1250-GISEL-NEXT: .LBB109_3: ; %atomicrmw.global
; GFX1250-GISEL-NEXT: flat_atomic_dec_u64 v0, v[4:5], s[2:3] offset:-128 scope:SCOPE_DEV
-; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr2
; GFX1250-GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX1250-GISEL-NEXT: s_cbranch_execz .LBB109_2
; GFX1250-GISEL-NEXT: .LBB109_4: ; %atomicrmw.private
+; GFX1250-GISEL-NEXT: s_mov_b32 s0, src_flat_scratch_base_lo
; GFX1250-GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v2, vcc_lo
+; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s0, v2
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX1250-GISEL-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
index b25d9b2..fc88839 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
@@ -3621,7 +3621,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; GFX9-NEXT: s_mov_b32 s0, 0
; GFX9-NEXT: scratch_store_dword off, v0, s0 offset:4
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: s_movk_i32 s0, 0x3004
+; GFX9-NEXT: s_movk_i32 s0, 0x3000
+; GFX9-NEXT: s_add_i32 s0, s0, 4
; GFX9-NEXT: v_mov_b32_e32 v0, 15
; GFX9-NEXT: scratch_store_dword off, v0, s0 offset:3712
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -3637,7 +3638,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s9
; GFX10-NEXT: v_mov_b32_e32 v0, 13
; GFX10-NEXT: v_mov_b32_e32 v1, 15
-; GFX10-NEXT: s_movk_i32 s0, 0x3804
+; GFX10-NEXT: s_movk_i32 s0, 0x3800
+; GFX10-NEXT: s_add_i32 s0, s0, 4
; GFX10-NEXT: scratch_store_dword off, v0, off offset:4
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: scratch_store_dword off, v1, s0 offset:1664
@@ -3682,7 +3684,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; GFX9-PAL-NEXT: s_addc_u32 flat_scratch_hi, s13, 0
; GFX9-PAL-NEXT: scratch_store_dword off, v0, s0 offset:4
; GFX9-PAL-NEXT: s_waitcnt vmcnt(0)
-; GFX9-PAL-NEXT: s_movk_i32 s0, 0x3004
+; GFX9-PAL-NEXT: s_movk_i32 s0, 0x3000
+; GFX9-PAL-NEXT: s_add_i32 s0, s0, 4
; GFX9-PAL-NEXT: v_mov_b32_e32 v0, 15
; GFX9-PAL-NEXT: scratch_store_dword off, v0, s0 offset:3712
; GFX9-PAL-NEXT: s_waitcnt vmcnt(0)
@@ -3716,8 +3719,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; GFX1010-PAL-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13
; GFX1010-PAL-NEXT: v_mov_b32_e32 v0, 13
; GFX1010-PAL-NEXT: v_mov_b32_e32 v1, 15
+; GFX1010-PAL-NEXT: s_movk_i32 s0, 0x3800
; GFX1010-PAL-NEXT: s_mov_b32 s1, 0
-; GFX1010-PAL-NEXT: s_movk_i32 s0, 0x3804
+; GFX1010-PAL-NEXT: s_add_i32 s0, s0, 4
; GFX1010-PAL-NEXT: scratch_store_dword off, v0, s1 offset:4
; GFX1010-PAL-NEXT: s_waitcnt_vscnt null, 0x0
; GFX1010-PAL-NEXT: scratch_store_dword off, v1, s0 offset:1664
@@ -3739,7 +3743,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; GFX1030-PAL-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13
; GFX1030-PAL-NEXT: v_mov_b32_e32 v0, 13
; GFX1030-PAL-NEXT: v_mov_b32_e32 v1, 15
-; GFX1030-PAL-NEXT: s_movk_i32 s0, 0x3804
+; GFX1030-PAL-NEXT: s_movk_i32 s0, 0x3800
+; GFX1030-PAL-NEXT: s_add_i32 s0, s0, 4
; GFX1030-PAL-NEXT: scratch_store_dword off, v0, off offset:4
; GFX1030-PAL-NEXT: s_waitcnt_vscnt null, 0x0
; GFX1030-PAL-NEXT: scratch_store_dword off, v1, s0 offset:1664
@@ -3785,10 +3790,12 @@ define void @store_load_large_imm_offset_foo() {
; GFX9-LABEL: store_load_large_imm_offset_foo:
; GFX9: ; %bb.0: ; %bb
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_movk_i32 s0, 0x3000
; GFX9-NEXT: v_mov_b32_e32 v0, 13
+; GFX9-NEXT: s_add_i32 s1, s32, s0
; GFX9-NEXT: scratch_store_dword off, v0, s32 offset:4
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: s_add_i32 s0, s32, 0x3004
+; GFX9-NEXT: s_add_i32 s0, s1, 4
; GFX9-NEXT: v_mov_b32_e32 v0, 15
; GFX9-NEXT: scratch_store_dword off, v0, s0 offset:3712
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -3800,8 +3807,10 @@ define void @store_load_large_imm_offset_foo() {
; GFX10: ; %bb.0: ; %bb
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v0, 13
+; GFX10-NEXT: s_movk_i32 s0, 0x3800
; GFX10-NEXT: v_mov_b32_e32 v1, 15
-; GFX10-NEXT: s_add_i32 s0, s32, 0x3804
+; GFX10-NEXT: s_add_i32 s1, s32, s0
+; GFX10-NEXT: s_add_i32 s0, s1, 4
; GFX10-NEXT: scratch_store_dword off, v0, s32 offset:4
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: scratch_store_dword off, v1, s0 offset:1664
@@ -3843,10 +3852,12 @@ define void @store_load_large_imm_offset_foo() {
; GFX9-PAL-LABEL: store_load_large_imm_offset_foo:
; GFX9-PAL: ; %bb.0: ; %bb
; GFX9-PAL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-PAL-NEXT: s_movk_i32 s0, 0x3000
; GFX9-PAL-NEXT: v_mov_b32_e32 v0, 13
+; GFX9-PAL-NEXT: s_add_i32 s1, s32, s0
; GFX9-PAL-NEXT: scratch_store_dword off, v0, s32 offset:4
; GFX9-PAL-NEXT: s_waitcnt vmcnt(0)
-; GFX9-PAL-NEXT: s_add_i32 s0, s32, 0x3004
+; GFX9-PAL-NEXT: s_add_i32 s0, s1, 4
; GFX9-PAL-NEXT: v_mov_b32_e32 v0, 15
; GFX9-PAL-NEXT: scratch_store_dword off, v0, s0 offset:3712
; GFX9-PAL-NEXT: s_waitcnt vmcnt(0)
@@ -3872,8 +3883,10 @@ define void @store_load_large_imm_offset_foo() {
; GFX10-PAL: ; %bb.0: ; %bb
; GFX10-PAL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-PAL-NEXT: v_mov_b32_e32 v0, 13
+; GFX10-PAL-NEXT: s_movk_i32 s0, 0x3800
; GFX10-PAL-NEXT: v_mov_b32_e32 v1, 15
-; GFX10-PAL-NEXT: s_add_i32 s0, s32, 0x3804
+; GFX10-PAL-NEXT: s_add_i32 s1, s32, s0
+; GFX10-PAL-NEXT: s_add_i32 s0, s1, 4
; GFX10-PAL-NEXT: scratch_store_dword off, v0, s32 offset:4
; GFX10-PAL-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-PAL-NEXT: scratch_store_dword off, v1, s0 offset:1664
diff --git a/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir
index 7fad2f4..a88b1ec 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir
@@ -75,7 +75,8 @@ stack:
body: |
bb.0:
; CHECK-LABEL: name: fold_frame_index__s_add_i32__fi_materializedconst_0
- ; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 %stack.0, 256, implicit-def $scc
+ ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 256
+ ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 %stack.0, [[S_MOV_B32_]], implicit-def $scc
; CHECK-NEXT: $sgpr4 = COPY [[S_ADD_I32_]]
; CHECK-NEXT: SI_RETURN implicit $sgpr4
%0:sreg_32 = S_MOV_B32 %stack.0
diff --git a/llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir b/llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir
index cc43142..2f2d727 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir
@@ -46,7 +46,8 @@ body: |
%2:sreg_32 = S_LSHL2_ADD_U32 %0, %1, implicit-def $scc
...
# GCN-LABEL: name: test_frameindex{{$}}
-# GCN: %1:sreg_32 = S_ADD_I32 %stack.0, 70
+# GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 70
+# GCN-NEXT: %1:sreg_32 = S_ADD_I32 %stack.0, [[S_MOV_B32_]]
---
name: test_frameindex
tracksRegLiveness: true
diff --git a/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll
index f9a24fe..0cb2b0b 100644
--- a/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll
@@ -2102,23 +2102,10 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret(ptr addrspace(3) %ptr, do
; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x24
; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x2c
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v2, s2
-; GFX1250-NEXT: s_mov_b32 s2, 0
-; GFX1250-NEXT: ds_load_b64 v[0:1], v0
-; GFX1250-NEXT: .LBB51_1: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: v_mov_b32_e32 v2, s2
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; GFX1250-NEXT: ds_add_f64 v2, v[0:1]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_add_f64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[4:5], v2, v[4:5], v[0:1]
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX1250-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
-; GFX1250-NEXT: s_or_b32 s2, vcc_lo, s2
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
-; GFX1250-NEXT: s_cbranch_execnz .LBB51_1
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.ds.fadd.f64(ptr addrspace(3) %ptr, double %data, i32 0, i32 0, i1 0)
@@ -2148,24 +2135,9 @@ define double @local_atomic_fadd_f64_rtn(ptr addrspace(3) %ptr, double %data) {
; GFX1250: ; %bb.0: ; %main_body
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v2, v0
-; GFX1250-NEXT: v_mov_b32_e32 v4, v1
-; GFX1250-NEXT: ds_load_b64 v[0:1], v0
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: .LBB52_1: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_mov_b64_e32 v[6:7], v[0:1]
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[6:7], v[4:5]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[6:7]
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB52_1
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = call double @llvm.amdgcn.ds.fadd.f64(ptr addrspace(3) %ptr, double %data, i32 0, i32 0, i1 0)
@@ -2197,24 +2169,11 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr
; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat:
; GFX1250: ; %bb.0: ; %main_body
; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v2, s0
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: ds_load_b64 v[0:1], v0
-; GFX1250-NEXT: .LBB53_1: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: v_mov_b32_e32 v2, s0
+; GFX1250-NEXT: ds_add_f64 v2, v[0:1]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_add_f64_e32 v[4:5], 4.0, v[0:1]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[4:5], v2, v[4:5], v[0:1]
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX1250-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB53_1
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -2246,24 +2205,11 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3
; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat_flush:
; GFX1250: ; %bb.0: ; %main_body
; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v2, s0
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: ds_load_b64 v[0:1], v0
-; GFX1250-NEXT: .LBB54_1: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_add_f64_e32 v[4:5], 4.0, v[0:1]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[4:5], v2, v[4:5], v[0:1]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s0
+; GFX1250-NEXT: ds_add_f64 v2, v[0:1]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX1250-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB54_1
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -2295,24 +2241,11 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrsp
; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe:
; GFX1250: ; %bb.0: ; %main_body
; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v2, s0
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: ds_load_b64 v[0:1], v0
-; GFX1250-NEXT: .LBB55_1: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_add_f64_e32 v[4:5], 4.0, v[0:1]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[4:5], v2, v[4:5], v[0:1]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s0
+; GFX1250-NEXT: ds_add_f64 v2, v[0:1]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX1250-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB55_1
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
@@ -2341,23 +2274,9 @@ define double @local_atomic_fadd_f64_rtn_pat(ptr addrspace(3) %ptr, double %data
; GFX1250: ; %bb.0: ; %main_body
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_mov_b32_e32 v2, v0
-; GFX1250-NEXT: ds_load_b64 v[0:1], v0
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: .LBB56_1: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_mov_b64_e32 v[4:5], v[0:1]
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: v_add_f64_e32 v[0:1], 4.0, v[4:5]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[4:5]
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], 4.0
+; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB56_1
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -2387,24 +2306,9 @@ define double @local_atomic_fadd_f64_rtn_ieee_unsafe(ptr addrspace(3) %ptr, doub
; GFX1250: ; %bb.0: ; %main_body
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v2, v0
-; GFX1250-NEXT: v_mov_b32_e32 v4, v1
-; GFX1250-NEXT: ds_load_b64 v[0:1], v0
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: .LBB57_1: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_mov_b64_e32 v[6:7], v[0:1]
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[6:7], v[4:5]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[6:7]
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB57_1
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = call double @llvm.amdgcn.ds.fadd.f64(ptr addrspace(3) %ptr, double %data, i32 0, i32 0, i1 0)
@@ -2434,24 +2338,9 @@ define double @local_atomic_fadd_f64_rtn_ieee_safe(ptr addrspace(3) %ptr, double
; GFX1250: ; %bb.0: ; %main_body
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v2, v0
-; GFX1250-NEXT: v_mov_b32_e32 v4, v1
-; GFX1250-NEXT: ds_load_b64 v[0:1], v0
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: .LBB58_1: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_mov_b64_e32 v[6:7], v[0:1]
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[6:7], v[4:5]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[6:7]
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB58_1
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = call double @llvm.amdgcn.ds.fadd.f64(ptr addrspace(3) %ptr, double %data, i32 0, i32 0, i1 0)
diff --git a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
index 15cda62..f2fe61f 100644
--- a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
+++ b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
@@ -360,7 +360,8 @@ entry:
; s_add_i32.
; GCN-LABEL: {{^}}fi_sop2_s_add_u32_literal_error:
-; GCN: s_add_u32 [[ADD_LO:s[0-9]+]], 0, 0x2010
+; GCN: s_movk_i32 [[S_MOVK_I32_:s[0-9]+]], 0x1000
+; GCN: s_add_u32 [[ADD_LO:s[0-9]+]], 0x1010, [[S_MOVK_I32_]]
; GCN: s_addc_u32 [[ADD_HI:s[0-9]+]], s{{[0-9]+}}, 0
define amdgpu_kernel void @fi_sop2_s_add_u32_literal_error() #0 {
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll b/llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll
index 1c298014..3001248 100644
--- a/llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll
+++ b/llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll
@@ -6,16 +6,24 @@ define amdgpu_gfx [13 x i32] @issue130120() {
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_mov_b32_e32 v0, 0
-; CHECK-NEXT: s_add_i32 s0, s32, 0xf0
-; CHECK-NEXT: s_add_i32 s1, s32, 0xf4
-; CHECK-NEXT: s_add_i32 s2, s32, 0xf8
-; CHECK-NEXT: s_add_i32 s3, s32, 0xfc
+; CHECK-NEXT: s_movk_i32 s1, 0xf4
+; CHECK-NEXT: s_movk_i32 s2, 0xf8
+; CHECK-NEXT: s_movk_i32 s3, 0xfc
+; CHECK-NEXT: s_movk_i32 s34, 0x100
; CHECK-NEXT: v_mov_b32_e32 v1, v0
-; CHECK-NEXT: s_add_i32 s34, s32, 0x100
-; CHECK-NEXT: s_add_i32 s35, s32, 0x104
-; CHECK-NEXT: s_add_i32 s36, s32, 0x108
-; CHECK-NEXT: s_add_i32 s37, s32, 0x110
-; CHECK-NEXT: s_add_i32 s38, s32, 0x120
+; CHECK-NEXT: s_movk_i32 s35, 0x104
+; CHECK-NEXT: s_movk_i32 s36, 0x108
+; CHECK-NEXT: s_movk_i32 s37, 0x110
+; CHECK-NEXT: s_movk_i32 s38, 0x120
+; CHECK-NEXT: s_add_i32 s0, s32, 0xf0
+; CHECK-NEXT: s_add_i32 s1, s32, s1
+; CHECK-NEXT: s_add_i32 s2, s32, s2
+; CHECK-NEXT: s_add_i32 s3, s32, s3
+; CHECK-NEXT: s_add_i32 s34, s32, s34
+; CHECK-NEXT: s_add_i32 s35, s32, s35
+; CHECK-NEXT: s_add_i32 s36, s32, s36
+; CHECK-NEXT: s_add_i32 s37, s32, s37
+; CHECK-NEXT: s_add_i32 s38, s32, s38
; CHECK-NEXT: s_or_b32 s39, s32, 4
; CHECK-NEXT: s_or_b32 s40, s32, 8
; CHECK-NEXT: s_or_b32 s41, s32, 12
diff --git a/llvm/test/CodeGen/AMDGPU/literal64.ll b/llvm/test/CodeGen/AMDGPU/literal64.ll
index 768c972..98691d3 100644
--- a/llvm/test/CodeGen/AMDGPU/literal64.ll
+++ b/llvm/test/CodeGen/AMDGPU/literal64.ll
@@ -67,24 +67,8 @@ define void @v_mov_b64_double(ptr addrspace(1) %ptr) {
; GCN: ; %bb.0:
; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: global_load_b64 v[4:5], v[0:1], off
-; GCN-NEXT: s_mov_b32 s0, 0
-; GCN-NEXT: .LBB6_1: ; %atomicrmw.start
-; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-NEXT: s_wait_loadcnt 0x0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_add_f64_e32 v[2:3], lit64(0x4063233333333333), v[4:5]
-; GCN-NEXT: global_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GCN-NEXT: s_wait_loadcnt 0x0
-; GCN-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
-; GCN-NEXT: s_wait_xcnt 0x0
-; GCN-NEXT: v_mov_b64_e32 v[4:5], v[2:3]
-; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GCN-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GCN-NEXT: s_cbranch_execnz .LBB6_1
-; GCN-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GCN-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4063233333333333)
+; GCN-NEXT: global_atomic_add_f64 v[0:1], v[2:3], off scope:SCOPE_SYS
; GCN-NEXT: s_set_pc_i64 s[30:31]
%result = atomicrmw fadd ptr addrspace(1) %ptr, double 153.1 monotonic
ret void
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.gfx1250.w32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.gfx1250.w32.ll
index 1c7c625..1bf865c 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.gfx1250.w32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.gfx1250.w32.ll
@@ -2236,6 +2236,170 @@ bb:
ret void
}
+define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, i32 %scale_src0, i32 %scale_src1, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4:
+; GFX1250: ; %bb.0: ; %bb
+; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], v40, v41 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1
+; GFX1250-NEXT: s_clause 0x3
+; GFX1250-NEXT: global_store_b128 v[42:43], v[36:39], off offset:48
+; GFX1250-NEXT: global_store_b128 v[42:43], v[32:35], off offset:32
+; GFX1250-NEXT: global_store_b128 v[42:43], v[28:31], off offset:16
+; GFX1250-NEXT: global_store_b128 v[42:43], v[24:27], off
+; GFX1250-NEXT: s_endpgm
+;
+; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4:
+; GISEL: ; %bb.0: ; %bb
+; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], v40, v41 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1
+; GISEL-NEXT: s_clause 0x3
+; GISEL-NEXT: global_store_b128 v[42:43], v[24:27], off
+; GISEL-NEXT: global_store_b128 v[42:43], v[28:31], off offset:16
+; GISEL-NEXT: global_store_b128 v[42:43], v[32:35], off offset:32
+; GISEL-NEXT: global_store_b128 v[42:43], v[36:39], off offset:48
+; GISEL-NEXT: s_endpgm
+bb:
+ %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> %C, i32 1, i32 0, i32 %scale_src0, i32 1, i32 0, i32 %scale_src1, i1 false, i1 false)
+ store <16 x float> %res, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4_ss(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, i32 inreg %scale_src0, i32 inreg %scale_src1, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4_ss:
+; GFX1250: ; %bb.0: ; %bb
+; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s0, s1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_a_reuse
+; GFX1250-NEXT: s_clause 0x3
+; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48
+; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32
+; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16
+; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off
+; GFX1250-NEXT: s_endpgm
+;
+; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4_ss:
+; GISEL: ; %bb.0: ; %bb
+; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s0, s1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_a_reuse
+; GISEL-NEXT: s_clause 0x3
+; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off
+; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16
+; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32
+; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48
+; GISEL-NEXT: s_endpgm
+bb:
+ %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> %C, i32 2, i32 1, i32 %scale_src0, i32 1, i32 2, i32 %scale_src1, i1 true, i1 false)
+ store <16 x float> %res, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4_si_scale(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, i32 inreg %scale_src0, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4_si_scale:
+; GFX1250: ; %bb.0: ; %bb
+; GFX1250-NEXT: s_movk_i32 s1, 0x64
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s0, s1 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_reuse
+; GFX1250-NEXT: s_clause 0x3
+; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48
+; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32
+; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16
+; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off
+; GFX1250-NEXT: s_endpgm
+;
+; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4_si_scale:
+; GISEL: ; %bb.0: ; %bb
+; GISEL-NEXT: v_mov_b32_e32 v42, 0x64
+; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s0, v42 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_reuse
+; GISEL-NEXT: s_clause 0x3
+; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off
+; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16
+; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32
+; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48
+; GISEL-NEXT: s_endpgm
+bb:
+ %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> %C, i32 3, i32 2, i32 %scale_src0, i32 0, i32 1, i32 100, i1 false, i1 true)
+ store <16 x float> %res, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, i64 %scale_src0, i64 %scale_src1, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4:
+; GFX1250: ; %bb.0: ; %bb
+; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], v[40:41], v[42:43] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1
+; GFX1250-NEXT: s_clause 0x3
+; GFX1250-NEXT: global_store_b128 v[44:45], v[36:39], off offset:48
+; GFX1250-NEXT: global_store_b128 v[44:45], v[32:35], off offset:32
+; GFX1250-NEXT: global_store_b128 v[44:45], v[28:31], off offset:16
+; GFX1250-NEXT: global_store_b128 v[44:45], v[24:27], off
+; GFX1250-NEXT: s_endpgm
+;
+; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4:
+; GISEL: ; %bb.0: ; %bb
+; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], v[40:41], v[42:43] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1
+; GISEL-NEXT: s_clause 0x3
+; GISEL-NEXT: global_store_b128 v[44:45], v[24:27], off
+; GISEL-NEXT: global_store_b128 v[44:45], v[28:31], off offset:16
+; GISEL-NEXT: global_store_b128 v[44:45], v[32:35], off offset:32
+; GISEL-NEXT: global_store_b128 v[44:45], v[36:39], off offset:48
+; GISEL-NEXT: s_endpgm
+bb:
+ %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> %C, i32 1, i32 0, i64 %scale_src0, i32 1, i32 0, i64 %scale_src1, i1 false, i1 false)
+ store <16 x float> %res, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4_ss(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, i64 inreg %scale_src0, i64 inreg %scale_src1, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4_ss:
+; GFX1250: ; %bb.0: ; %bb
+; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s[0:1], s[2:3] matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_a_reuse
+; GFX1250-NEXT: s_clause 0x3
+; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48
+; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32
+; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16
+; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off
+; GFX1250-NEXT: s_endpgm
+;
+; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4_ss:
+; GISEL: ; %bb.0: ; %bb
+; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s[0:1], s[2:3] matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_a_reuse
+; GISEL-NEXT: s_clause 0x3
+; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off
+; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16
+; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32
+; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48
+; GISEL-NEXT: s_endpgm
+bb:
+ %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> %C, i32 2, i32 1, i64 %scale_src0, i32 1, i32 2, i64 %scale_src1, i1 true, i1 false)
+ store <16 x float> %res, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4_si_scale(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, i64 inreg %scale_src0, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4_si_scale:
+; GFX1250: ; %bb.0: ; %bb
+; GFX1250-NEXT: s_mov_b64 s[2:3], 0x64
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s[0:1], s[2:3] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_reuse
+; GFX1250-NEXT: s_clause 0x3
+; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48
+; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32
+; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16
+; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off
+; GFX1250-NEXT: s_endpgm
+;
+; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4_si_scale:
+; GISEL: ; %bb.0: ; %bb
+; GISEL-NEXT: v_mov_b64_e32 v[42:43], 0x64
+; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], s[0:1], v[42:43] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3 matrix_b_reuse
+; GISEL-NEXT: s_clause 0x3
+; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off
+; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16
+; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32
+; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48
+; GISEL-NEXT: s_endpgm
+bb:
+ %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> %C, i32 3, i32 2, i64 %scale_src0, i32 0, i32 1, i64 100, i1 false, i1 true)
+ store <16 x float> %res, ptr addrspace(1) %out
+ ret void
+}
+
define amdgpu_ps void @test_swmmac_f32_16x16x64_bf16(<16 x bfloat> %A, <32 x bfloat> %B, <8 x float> %C, i16 %Index, ptr addrspace(1) %out) {
; GFX1250-LABEL: test_swmmac_f32_16x16x64_bf16:
; GFX1250: ; %bb.0: ; %bb
@@ -2573,6 +2737,8 @@ declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.fp8.bf8.v8f32.v16i32(<16 x i
declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.fp8.v8f32.v16i32(<16 x i32>, <16 x i32>, i16, <8 x float>, i1, i1)
declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.bf8.v8f32.v16i32(<16 x i32>, <16 x i32>, i16, <8 x float>, i1, i1)
declare <16 x float> @llvm.amdgcn.wmma.f32.32x16x128.f4.v16i32.v8i32.v16f32(<16 x i32>, <8 x i32>, i16, <16 x float>)
+declare <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32>, <8 x i32>, i16, <16 x float>, i32, i32, i32, i32, i32, i32, i1, i1)
+declare <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32>, <8 x i32>, i16, <16 x float>, i32, i32, i64, i32, i32, i64, i1, i1)
declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x64.bf16.v8f32.v16bf16.v32bf16.i16(i1, <16 x bfloat>, i1, <32 x bfloat>, <8 x float>, i16, i1, i1)
declare <8 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x64.bf16.v8bf16.v16bf16.v32bf16.i16(i1, <16 x bfloat>, i1, <32 x bfloat>, <8 x bfloat>, i16, i1, i1)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imm.gfx1250.w32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imm.gfx1250.w32.ll
index e602c31..48303c0 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imm.gfx1250.w32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imm.gfx1250.w32.ll
@@ -2530,6 +2530,312 @@ bb:
ret void
}
+define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4(<16 x i32> %A, <8 x i32> %B, i32 inreg %scale_src0, i32 inreg %scale_src1, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4:
+; GFX1250: ; %bb.0: ; %bb
+; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], 1.0, s0, s1 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse
+; GFX1250-NEXT: s_clause 0x3
+; GFX1250-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48
+; GFX1250-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32
+; GFX1250-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16
+; GFX1250-NEXT: global_store_b128 v[24:25], v[26:29], off
+; GFX1250-NEXT: s_endpgm
+;
+; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4:
+; GISEL: ; %bb.0: ; %bb
+; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], 1.0, s0, s1 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse
+; GISEL-NEXT: s_clause 0x3
+; GISEL-NEXT: global_store_b128 v[24:25], v[26:29], off
+; GISEL-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16
+; GISEL-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32
+; GISEL-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48
+; GISEL-NEXT: s_endpgm
+bb:
+ %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, i32 1, i32 0, i32 %scale_src0, i32 1, i32 0, i32 %scale_src1, i1 true, i1 false)
+ store <16 x float> %res, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4_non_splat(<16 x i32> %A, <8 x i32> %B, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4_non_splat:
+; GFX1250: ; %bb.0: ; %bb
+; GFX1250-NEXT: v_dual_mov_b32 v26, 1.0 :: v_dual_mov_b32 v27, 2.0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_dual_mov_b32 v28, v26 :: v_dual_mov_b32 v29, v26
+; GFX1250-NEXT: v_dual_mov_b32 v30, v26 :: v_dual_mov_b32 v31, v26
+; GFX1250-NEXT: v_dual_mov_b32 v32, v26 :: v_dual_mov_b32 v33, v26
+; GFX1250-NEXT: v_dual_mov_b32 v34, v26 :: v_dual_mov_b32 v35, v26
+; GFX1250-NEXT: v_dual_mov_b32 v36, v26 :: v_dual_mov_b32 v37, v26
+; GFX1250-NEXT: v_dual_mov_b32 v38, v26 :: v_dual_mov_b32 v39, v26
+; GFX1250-NEXT: v_dual_mov_b32 v40, v26 :: v_dual_mov_b32 v41, v26
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], 1, 2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1
+; GFX1250-NEXT: s_clause 0x3
+; GFX1250-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48
+; GFX1250-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32
+; GFX1250-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16
+; GFX1250-NEXT: global_store_b128 v[24:25], v[26:29], off
+; GFX1250-NEXT: s_endpgm
+;
+; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4_non_splat:
+; GISEL: ; %bb.0: ; %bb
+; GISEL-NEXT: s_mov_b32 s0, 1.0
+; GISEL-NEXT: s_mov_b32 s1, 2.0
+; GISEL-NEXT: s_mov_b32 s14, s0
+; GISEL-NEXT: s_mov_b32 s15, s0
+; GISEL-NEXT: s_mov_b32 s2, s0
+; GISEL-NEXT: s_mov_b32 s3, s0
+; GISEL-NEXT: s_mov_b32 s4, s0
+; GISEL-NEXT: s_mov_b32 s5, s0
+; GISEL-NEXT: s_mov_b32 s6, s0
+; GISEL-NEXT: s_mov_b32 s7, s0
+; GISEL-NEXT: s_mov_b32 s8, s0
+; GISEL-NEXT: s_mov_b32 s9, s0
+; GISEL-NEXT: s_mov_b32 s10, s0
+; GISEL-NEXT: s_mov_b32 s11, s0
+; GISEL-NEXT: s_mov_b32 s12, s0
+; GISEL-NEXT: s_mov_b32 s13, s0
+; GISEL-NEXT: v_mov_b64_e32 v[40:41], s[14:15]
+; GISEL-NEXT: v_mov_b64_e32 v[38:39], s[12:13]
+; GISEL-NEXT: v_mov_b64_e32 v[36:37], s[10:11]
+; GISEL-NEXT: v_mov_b64_e32 v[34:35], s[8:9]
+; GISEL-NEXT: v_mov_b64_e32 v[32:33], s[6:7]
+; GISEL-NEXT: v_mov_b64_e32 v[30:31], s[4:5]
+; GISEL-NEXT: v_mov_b64_e32 v[28:29], s[2:3]
+; GISEL-NEXT: v_mov_b64_e32 v[26:27], s[0:1]
+; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], 1, 2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1
+; GISEL-NEXT: s_clause 0x3
+; GISEL-NEXT: global_store_b128 v[24:25], v[26:29], off
+; GISEL-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16
+; GISEL-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32
+; GISEL-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48
+; GISEL-NEXT: s_endpgm
+bb:
+ %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> <float 1.0, float 2.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, i32 1, i32 0, i32 1, i32 1, i32 0, i32 2, i1 false, i1 false)
+ store <16 x float> %res, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4_non_inlineable(<16 x i32> %A, <8 x i32> %B, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4_non_inlineable:
+; GFX1250: ; %bb.0: ; %bb
+; GFX1250-NEXT: v_mov_b32_e32 v26, 0x40400000
+; GFX1250-NEXT: s_movk_i32 s0, 0x65
+; GFX1250-NEXT: s_movk_i32 s1, 0x64
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_dual_mov_b32 v27, v26 :: v_dual_mov_b32 v28, v26
+; GFX1250-NEXT: v_dual_mov_b32 v29, v26 :: v_dual_mov_b32 v30, v26
+; GFX1250-NEXT: v_dual_mov_b32 v31, v26 :: v_dual_mov_b32 v32, v26
+; GFX1250-NEXT: v_dual_mov_b32 v33, v26 :: v_dual_mov_b32 v34, v26
+; GFX1250-NEXT: v_dual_mov_b32 v35, v26 :: v_dual_mov_b32 v36, v26
+; GFX1250-NEXT: v_dual_mov_b32 v37, v26 :: v_dual_mov_b32 v38, v26
+; GFX1250-NEXT: v_dual_mov_b32 v39, v26 :: v_dual_mov_b32 v40, v26
+; GFX1250-NEXT: v_mov_b32_e32 v41, v26
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], s1, s0 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse
+; GFX1250-NEXT: s_clause 0x3
+; GFX1250-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48
+; GFX1250-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32
+; GFX1250-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16
+; GFX1250-NEXT: global_store_b128 v[24:25], v[26:29], off
+; GFX1250-NEXT: s_endpgm
+;
+; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4_non_inlineable:
+; GISEL: ; %bb.0: ; %bb
+; GISEL-NEXT: s_mov_b32 s0, 0x40400000
+; GISEL-NEXT: v_mov_b32_e32 v42, 0x64
+; GISEL-NEXT: s_mov_b32 s14, s0
+; GISEL-NEXT: s_mov_b32 s15, s0
+; GISEL-NEXT: s_mov_b32 s1, s0
+; GISEL-NEXT: s_mov_b32 s2, s0
+; GISEL-NEXT: s_mov_b32 s3, s0
+; GISEL-NEXT: s_mov_b32 s4, s0
+; GISEL-NEXT: s_mov_b32 s5, s0
+; GISEL-NEXT: s_mov_b32 s6, s0
+; GISEL-NEXT: s_mov_b32 s7, s0
+; GISEL-NEXT: s_mov_b32 s8, s0
+; GISEL-NEXT: s_mov_b32 s9, s0
+; GISEL-NEXT: s_mov_b32 s10, s0
+; GISEL-NEXT: s_mov_b32 s11, s0
+; GISEL-NEXT: s_mov_b32 s12, s0
+; GISEL-NEXT: s_mov_b32 s13, s0
+; GISEL-NEXT: v_mov_b64_e32 v[40:41], s[14:15]
+; GISEL-NEXT: v_mov_b64_e32 v[38:39], s[12:13]
+; GISEL-NEXT: v_mov_b64_e32 v[36:37], s[10:11]
+; GISEL-NEXT: v_mov_b64_e32 v[34:35], s[8:9]
+; GISEL-NEXT: v_mov_b64_e32 v[32:33], s[6:7]
+; GISEL-NEXT: v_mov_b64_e32 v[30:31], s[4:5]
+; GISEL-NEXT: v_mov_b64_e32 v[28:29], s[2:3]
+; GISEL-NEXT: v_mov_b64_e32 v[26:27], s[0:1]
+; GISEL-NEXT: v_mov_b32_e32 v43, 0x65
+; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], v42, v43 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse
+; GISEL-NEXT: s_clause 0x3
+; GISEL-NEXT: global_store_b128 v[24:25], v[26:29], off
+; GISEL-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16
+; GISEL-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32
+; GISEL-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48
+; GISEL-NEXT: s_endpgm
+bb:
+ %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> <float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0>, i32 1, i32 0, i32 100, i32 1, i32 0, i32 101, i1 true, i1 false)
+ store <16 x float> %res, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4(<16 x i32> %A, <8 x i32> %B, i64 inreg %scale_src0, i64 inreg %scale_src1, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4:
+; GFX1250: ; %bb.0: ; %bb
+; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], 1.0, s[0:1], s[2:3] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse
+; GFX1250-NEXT: s_clause 0x3
+; GFX1250-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48
+; GFX1250-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32
+; GFX1250-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16
+; GFX1250-NEXT: global_store_b128 v[24:25], v[26:29], off
+; GFX1250-NEXT: s_endpgm
+;
+; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4:
+; GISEL: ; %bb.0: ; %bb
+; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], 1.0, s[0:1], s[2:3] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse
+; GISEL-NEXT: s_clause 0x3
+; GISEL-NEXT: global_store_b128 v[24:25], v[26:29], off
+; GISEL-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16
+; GISEL-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32
+; GISEL-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48
+; GISEL-NEXT: s_endpgm
+bb:
+ %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, i32 1, i32 0, i64 %scale_src0, i32 1, i32 0, i64 %scale_src1, i1 true, i1 false)
+ store <16 x float> %res, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4_non_splat(<16 x i32> %A, <8 x i32> %B, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4_non_splat:
+; GFX1250: ; %bb.0: ; %bb
+; GFX1250-NEXT: v_dual_mov_b32 v26, 1.0 :: v_dual_mov_b32 v27, 2.0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_dual_mov_b32 v28, v26 :: v_dual_mov_b32 v29, v26
+; GFX1250-NEXT: v_dual_mov_b32 v30, v26 :: v_dual_mov_b32 v31, v26
+; GFX1250-NEXT: v_dual_mov_b32 v32, v26 :: v_dual_mov_b32 v33, v26
+; GFX1250-NEXT: v_dual_mov_b32 v34, v26 :: v_dual_mov_b32 v35, v26
+; GFX1250-NEXT: v_dual_mov_b32 v36, v26 :: v_dual_mov_b32 v37, v26
+; GFX1250-NEXT: v_dual_mov_b32 v38, v26 :: v_dual_mov_b32 v39, v26
+; GFX1250-NEXT: v_dual_mov_b32 v40, v26 :: v_dual_mov_b32 v41, v26
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], 1, 2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1
+; GFX1250-NEXT: s_clause 0x3
+; GFX1250-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48
+; GFX1250-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32
+; GFX1250-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16
+; GFX1250-NEXT: global_store_b128 v[24:25], v[26:29], off
+; GFX1250-NEXT: s_endpgm
+;
+; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4_non_splat:
+; GISEL: ; %bb.0: ; %bb
+; GISEL-NEXT: s_mov_b32 s0, 1.0
+; GISEL-NEXT: s_mov_b32 s1, 2.0
+; GISEL-NEXT: s_mov_b32 s14, s0
+; GISEL-NEXT: s_mov_b32 s15, s0
+; GISEL-NEXT: s_mov_b32 s2, s0
+; GISEL-NEXT: s_mov_b32 s3, s0
+; GISEL-NEXT: s_mov_b32 s4, s0
+; GISEL-NEXT: s_mov_b32 s5, s0
+; GISEL-NEXT: s_mov_b32 s6, s0
+; GISEL-NEXT: s_mov_b32 s7, s0
+; GISEL-NEXT: s_mov_b32 s8, s0
+; GISEL-NEXT: s_mov_b32 s9, s0
+; GISEL-NEXT: s_mov_b32 s10, s0
+; GISEL-NEXT: s_mov_b32 s11, s0
+; GISEL-NEXT: s_mov_b32 s12, s0
+; GISEL-NEXT: s_mov_b32 s13, s0
+; GISEL-NEXT: v_mov_b64_e32 v[40:41], s[14:15]
+; GISEL-NEXT: v_mov_b64_e32 v[38:39], s[12:13]
+; GISEL-NEXT: v_mov_b64_e32 v[36:37], s[10:11]
+; GISEL-NEXT: v_mov_b64_e32 v[34:35], s[8:9]
+; GISEL-NEXT: v_mov_b64_e32 v[32:33], s[6:7]
+; GISEL-NEXT: v_mov_b64_e32 v[30:31], s[4:5]
+; GISEL-NEXT: v_mov_b64_e32 v[28:29], s[2:3]
+; GISEL-NEXT: v_mov_b64_e32 v[26:27], s[0:1]
+; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], 1, 2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1
+; GISEL-NEXT: s_clause 0x3
+; GISEL-NEXT: global_store_b128 v[24:25], v[26:29], off
+; GISEL-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16
+; GISEL-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32
+; GISEL-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48
+; GISEL-NEXT: s_endpgm
+bb:
+ %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> <float 1.0, float 2.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, i32 1, i32 0, i64 1, i32 1, i32 0, i64 2, i1 false, i1 false)
+ store <16 x float> %res, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4_non_inlineable(<16 x i32> %A, <8 x i32> %B, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4_non_inlineable:
+; GFX1250: ; %bb.0: ; %bb
+; GFX1250-NEXT: v_mov_b32_e32 v26, 0x40400000
+; GFX1250-NEXT: s_mov_b64 s[0:1], 0x65
+; GFX1250-NEXT: s_mov_b64 s[2:3], 0x64
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_dual_mov_b32 v27, v26 :: v_dual_mov_b32 v28, v26
+; GFX1250-NEXT: v_dual_mov_b32 v29, v26 :: v_dual_mov_b32 v30, v26
+; GFX1250-NEXT: v_dual_mov_b32 v31, v26 :: v_dual_mov_b32 v32, v26
+; GFX1250-NEXT: v_dual_mov_b32 v33, v26 :: v_dual_mov_b32 v34, v26
+; GFX1250-NEXT: v_dual_mov_b32 v35, v26 :: v_dual_mov_b32 v36, v26
+; GFX1250-NEXT: v_dual_mov_b32 v37, v26 :: v_dual_mov_b32 v38, v26
+; GFX1250-NEXT: v_dual_mov_b32 v39, v26 :: v_dual_mov_b32 v40, v26
+; GFX1250-NEXT: v_mov_b32_e32 v41, v26
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], s[2:3], s[0:1] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse
+; GFX1250-NEXT: s_clause 0x3
+; GFX1250-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48
+; GFX1250-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32
+; GFX1250-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16
+; GFX1250-NEXT: global_store_b128 v[24:25], v[26:29], off
+; GFX1250-NEXT: s_endpgm
+;
+; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4_non_inlineable:
+; GISEL: ; %bb.0: ; %bb
+; GISEL-NEXT: s_mov_b32 s0, 0x40400000
+; GISEL-NEXT: v_mov_b64_e32 v[42:43], 0x64
+; GISEL-NEXT: s_mov_b32 s14, s0
+; GISEL-NEXT: s_mov_b32 s15, s0
+; GISEL-NEXT: s_mov_b32 s1, s0
+; GISEL-NEXT: s_mov_b32 s2, s0
+; GISEL-NEXT: s_mov_b32 s3, s0
+; GISEL-NEXT: s_mov_b32 s4, s0
+; GISEL-NEXT: s_mov_b32 s5, s0
+; GISEL-NEXT: s_mov_b32 s6, s0
+; GISEL-NEXT: s_mov_b32 s7, s0
+; GISEL-NEXT: s_mov_b32 s8, s0
+; GISEL-NEXT: s_mov_b32 s9, s0
+; GISEL-NEXT: s_mov_b32 s10, s0
+; GISEL-NEXT: s_mov_b32 s11, s0
+; GISEL-NEXT: s_mov_b32 s12, s0
+; GISEL-NEXT: s_mov_b32 s13, s0
+; GISEL-NEXT: v_mov_b64_e32 v[40:41], s[14:15]
+; GISEL-NEXT: v_mov_b64_e32 v[38:39], s[12:13]
+; GISEL-NEXT: v_mov_b64_e32 v[36:37], s[10:11]
+; GISEL-NEXT: v_mov_b64_e32 v[34:35], s[8:9]
+; GISEL-NEXT: v_mov_b64_e32 v[32:33], s[6:7]
+; GISEL-NEXT: v_mov_b64_e32 v[30:31], s[4:5]
+; GISEL-NEXT: v_mov_b64_e32 v[28:29], s[2:3]
+; GISEL-NEXT: v_mov_b64_e32 v[26:27], s[0:1]
+; GISEL-NEXT: v_mov_b64_e32 v[44:45], 0x65
+; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[26:41], v[0:15], v[16:23], v[26:41], v[42:43], v[44:45] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse
+; GISEL-NEXT: s_clause 0x3
+; GISEL-NEXT: global_store_b128 v[24:25], v[26:29], off
+; GISEL-NEXT: global_store_b128 v[24:25], v[30:33], off offset:16
+; GISEL-NEXT: global_store_b128 v[24:25], v[34:37], off offset:32
+; GISEL-NEXT: global_store_b128 v[24:25], v[38:41], off offset:48
+; GISEL-NEXT: s_endpgm
+bb:
+ %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 0, <16 x float> <float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0, float 3.0>, i32 1, i32 0, i64 100, i32 1, i32 0, i64 101, i1 true, i1 false)
+ store <16 x float> %res, ptr addrspace(1) %out
+ ret void
+}
+
declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x4.f32.v8f32.v2f32(i1, <2 x float>, i1, <2 x float>, i16, <8 x float>, i1, i1)
declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.bf16.v8f32.v16bf16(i1, <16 x bfloat>, i1, <16 x bfloat>, i16, <8 x float>, i1, i1)
declare <8 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x32.bf16.v8bf16.v16bf16(i1, <16 x bfloat>, i1, <16 x bfloat>, i16, <8 x bfloat>, i1, i1)
@@ -2557,3 +2863,5 @@ declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.fp8.bf8.v8f32.v16i32(<16 x i
declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.fp8.v8f32.v16i32(<16 x i32>, <16 x i32>, i16, <8 x float>, i1, i1)
declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.bf8.v8f32.v16i32(<16 x i32>, <16 x i32>, i16, <8 x float>, i1, i1)
declare <16 x float> @llvm.amdgcn.wmma.f32.32x16x128.f4.v16i32.v8i32.v16f32(<16 x i32>, <8 x i32>, i16, <16 x float>)
+declare <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32>, <8 x i32>, i16, <16 x float>, i32, i32, i32, i32, i32, i32, i1, i1)
+declare <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32>, <8 x i32>, i16, <16 x float>, i32, i32, i64, i32, i32, i64, i1, i1)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imod.gfx1250.w32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imod.gfx1250.w32.ll
index 14699ce..8f674f8 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imod.gfx1250.w32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wmma.imod.gfx1250.w32.ll
@@ -1882,6 +1882,162 @@ bb:
ret void
}
+define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4_negC(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4_negC:
+; GFX1250: ; %bb.0: ; %bb
+; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1]
+; GFX1250-NEXT: s_clause 0x3
+; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48
+; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32
+; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16
+; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off
+; GFX1250-NEXT: s_endpgm
+;
+; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4_negC:
+; GISEL: ; %bb.0: ; %bb
+; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1]
+; GISEL-NEXT: s_clause 0x3
+; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off
+; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16
+; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32
+; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48
+; GISEL-NEXT: s_endpgm
+bb:
+ %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 1, <16 x float> %C, i32 1, i32 0, i32 2, i32 1, i32 0, i32 4, i1 false, i1 false)
+ store <16 x float> %res, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4_neg_absC(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4_neg_absC:
+; GFX1250: ; %bb.0: ; %bb
+; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1]
+; GFX1250-NEXT: s_clause 0x3
+; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48
+; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32
+; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16
+; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off
+; GFX1250-NEXT: s_endpgm
+;
+; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4_neg_absC:
+; GISEL: ; %bb.0: ; %bb
+; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1]
+; GISEL-NEXT: s_clause 0x3
+; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off
+; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16
+; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32
+; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48
+; GISEL-NEXT: s_endpgm
+bb:
+ %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 3, <16 x float> %C, i32 1, i32 0, i32 2, i32 1, i32 0, i32 4, i1 false, i1 false)
+ store <16 x float> %res, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_wmma_scale_f32_32x16x128_f4_ignoreC(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_wmma_scale_f32_32x16x128_f4_ignoreC:
+; GFX1250: ; %bb.0: ; %bb
+; GFX1250-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1
+; GFX1250-NEXT: s_clause 0x3
+; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48
+; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32
+; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16
+; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off
+; GFX1250-NEXT: s_endpgm
+;
+; GISEL-LABEL: test_wmma_scale_f32_32x16x128_f4_ignoreC:
+; GISEL: ; %bb.0: ; %bb
+; GISEL-NEXT: v_wmma_scale_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1
+; GISEL-NEXT: s_clause 0x3
+; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off
+; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16
+; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32
+; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48
+; GISEL-NEXT: s_endpgm
+bb:
+ %res = call <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 4, <16 x float> %C, i32 1, i32 0, i32 2, i32 1, i32 0, i32 4, i1 false, i1 false)
+ store <16 x float> %res, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4_negC(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4_negC:
+; GFX1250: ; %bb.0: ; %bb
+; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1]
+; GFX1250-NEXT: s_clause 0x3
+; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48
+; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32
+; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16
+; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off
+; GFX1250-NEXT: s_endpgm
+;
+; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4_negC:
+; GISEL: ; %bb.0: ; %bb
+; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1]
+; GISEL-NEXT: s_clause 0x3
+; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off
+; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16
+; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32
+; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48
+; GISEL-NEXT: s_endpgm
+bb:
+ %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 1, <16 x float> %C, i32 1, i32 0, i64 2, i32 1, i32 0, i64 4, i1 false, i1 false)
+ store <16 x float> %res, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4_neg_absC(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4_neg_absC:
+; GFX1250: ; %bb.0: ; %bb
+; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1]
+; GFX1250-NEXT: s_clause 0x3
+; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48
+; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32
+; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16
+; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off
+; GFX1250-NEXT: s_endpgm
+;
+; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4_neg_absC:
+; GISEL: ; %bb.0: ; %bb
+; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1]
+; GISEL-NEXT: s_clause 0x3
+; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off
+; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16
+; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32
+; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48
+; GISEL-NEXT: s_endpgm
+bb:
+ %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 3, <16 x float> %C, i32 1, i32 0, i64 2, i32 1, i32 0, i64 4, i1 false, i1 false)
+ store <16 x float> %res, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_wmma_scale16_f32_32x16x128_f4_ignoreC(<16 x i32> %A, <8 x i32> %B, <16 x float> %C, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_wmma_scale16_f32_32x16x128_f4_ignoreC:
+; GFX1250: ; %bb.0: ; %bb
+; GFX1250-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1
+; GFX1250-NEXT: s_clause 0x3
+; GFX1250-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48
+; GFX1250-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32
+; GFX1250-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16
+; GFX1250-NEXT: global_store_b128 v[40:41], v[24:27], off
+; GFX1250-NEXT: s_endpgm
+;
+; GISEL-LABEL: test_wmma_scale16_f32_32x16x128_f4_ignoreC:
+; GISEL: ; %bb.0: ; %bb
+; GISEL-NEXT: v_wmma_scale16_f32_32x16x128_f4 v[24:39], v[0:15], v[16:23], v[24:39], 2, 4 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1
+; GISEL-NEXT: s_clause 0x3
+; GISEL-NEXT: global_store_b128 v[40:41], v[24:27], off
+; GISEL-NEXT: global_store_b128 v[40:41], v[28:31], off offset:16
+; GISEL-NEXT: global_store_b128 v[40:41], v[32:35], off offset:32
+; GISEL-NEXT: global_store_b128 v[40:41], v[36:39], off offset:48
+; GISEL-NEXT: s_endpgm
+bb:
+ %res = call <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32> %A, <8 x i32> %B, i16 4, <16 x float> %C, i32 1, i32 0, i64 2, i32 1, i32 0, i64 4, i1 false, i1 false)
+ store <16 x float> %res, ptr addrspace(1) %out
+ ret void
+}
+
define amdgpu_ps void @test_swmmac_f32_16x16x64_bf16_negA(<16 x bfloat> %A, <32 x bfloat> %B, <8 x float> %C, i16 %Index, ptr addrspace(1) %out) {
; GFX1250-LABEL: test_swmmac_f32_16x16x64_bf16_negA:
; GFX1250: ; %bb.0: ; %bb
@@ -2177,6 +2333,8 @@ declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.fp8.bf8.v8f32.v16i32(<16 x i
declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.fp8.v8f32.v16i32(<16 x i32>, <16 x i32>, i16, <8 x float>, i1, i1)
declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x128.bf8.bf8.v8f32.v16i32(<16 x i32>, <16 x i32>, i16, <8 x float>, i1, i1)
declare <16 x float> @llvm.amdgcn.wmma.f32.32x16x128.f4.v16i32.v8i32.v16f32(<16 x i32>, <8 x i32>, i16, <16 x float>)
+declare <16 x float> @llvm.amdgcn.wmma.scale.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32>, <8 x i32>, i16, <16 x float>, i32, i32, i32, i32, i32, i32, i1, i1)
+declare <16 x float> @llvm.amdgcn.wmma.scale16.f32.32x16x128.f4.v16f32.v16i32.v8i32(<16 x i32>, <8 x i32>, i16, <16 x float>, i32, i32, i64, i32, i32, i64, i1, i1)
declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x64.bf16.v8f32.v16bf16.v32bf16.i16(i1, <16 x bfloat>, i1, <32 x bfloat>, <8 x float>, i16, i1, i1)
declare <8 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x64.bf16.v8bf16.v16bf16.v32bf16.i16(i1, <16 x bfloat>, i1, <32 x bfloat>, <8 x bfloat>, i16, i1, i1)
diff --git a/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll b/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll
index a3ebaec..5f0ca7b 100644
--- a/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll
@@ -74,7 +74,8 @@ define amdgpu_kernel void @local_stack_offset_uses_sp(ptr addrspace(1) %out) {
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
; FLATSCR-NEXT: s_cbranch_scc1 .LBB0_1
; FLATSCR-NEXT: ; %bb.2: ; %split
-; FLATSCR-NEXT: s_movk_i32 s0, 0x5000
+; FLATSCR-NEXT: s_movk_i32 s0, 0x2000
+; FLATSCR-NEXT: s_addk_i32 s0, 0x3000
; FLATSCR-NEXT: scratch_load_dwordx2 v[0:1], off, s0 offset:208 glc
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
; FLATSCR-NEXT: s_movk_i32 s0, 0x3000
@@ -175,7 +176,9 @@ define void @func_local_stack_offset_uses_sp(ptr addrspace(1) %out) {
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
; FLATSCR-NEXT: s_cbranch_scc1 .LBB1_1
; FLATSCR-NEXT: ; %bb.2: ; %split
-; FLATSCR-NEXT: s_add_i32 s0, s33, 0x5000
+; FLATSCR-NEXT: s_movk_i32 s0, 0x2000
+; FLATSCR-NEXT: s_add_i32 s1, s33, s0
+; FLATSCR-NEXT: s_add_i32 s0, s1, 0x3000
; FLATSCR-NEXT: scratch_load_dwordx2 v[2:3], off, s0 offset:208 glc
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
; FLATSCR-NEXT: s_add_i32 s0, s33, 0x3000
@@ -223,30 +226,35 @@ define amdgpu_kernel void @local_stack_offset_uses_sp_flat(ptr addrspace(1) %out
; MUBUF-NEXT: s_waitcnt vmcnt(0)
; MUBUF-NEXT: s_cbranch_scc1 .LBB2_1
; MUBUF-NEXT: ; %bb.2: ; %split
+; MUBUF-NEXT: s_movk_i32 s5, 0x12d4
; MUBUF-NEXT: v_mov_b32_e32 v1, 0x4000
-; MUBUF-NEXT: v_or_b32_e32 v0, 0x12d4, v1
+; MUBUF-NEXT: v_or_b32_e32 v0, s5, v1
+; MUBUF-NEXT: s_movk_i32 s5, 0x12d0
; MUBUF-NEXT: v_mov_b32_e32 v1, 0x4000
; MUBUF-NEXT: s_movk_i32 s4, 0x4000
; MUBUF-NEXT: buffer_load_dword v5, v0, s[0:3], 0 offen glc
; MUBUF-NEXT: s_waitcnt vmcnt(0)
-; MUBUF-NEXT: v_or_b32_e32 v0, 0x12d0, v1
+; MUBUF-NEXT: v_or_b32_e32 v0, s5, v1
+; MUBUF-NEXT: s_movk_i32 s5, 0x12c4
; MUBUF-NEXT: v_mov_b32_e32 v1, 0x4000
; MUBUF-NEXT: s_or_b32 s4, s4, 0x12c0
; MUBUF-NEXT: buffer_load_dword v4, v0, s[0:3], 0 offen glc
; MUBUF-NEXT: s_waitcnt vmcnt(0)
-; MUBUF-NEXT: v_or_b32_e32 v0, 0x12c4, v1
-; MUBUF-NEXT: v_mov_b32_e32 v3, 0x4000
+; MUBUF-NEXT: v_or_b32_e32 v0, s5, v1
; MUBUF-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen glc
; MUBUF-NEXT: s_waitcnt vmcnt(0)
; MUBUF-NEXT: v_mov_b32_e32 v0, s4
-; MUBUF-NEXT: v_or_b32_e32 v2, 0x12cc, v3
+; MUBUF-NEXT: s_movk_i32 s4, 0x12cc
+; MUBUF-NEXT: v_mov_b32_e32 v3, 0x4000
+; MUBUF-NEXT: v_or_b32_e32 v2, s4, v3
+; MUBUF-NEXT: s_movk_i32 s4, 0x12c8
; MUBUF-NEXT: v_mov_b32_e32 v6, 0x4000
; MUBUF-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen glc
; MUBUF-NEXT: s_waitcnt vmcnt(0)
; MUBUF-NEXT: v_mov_b32_e32 v7, 0x4000
; MUBUF-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen glc
; MUBUF-NEXT: s_waitcnt vmcnt(0)
-; MUBUF-NEXT: v_or_b32_e32 v2, 0x12c8, v6
+; MUBUF-NEXT: v_or_b32_e32 v2, s4, v6
; MUBUF-NEXT: v_mov_b32_e32 v8, 0x4000
; MUBUF-NEXT: v_mov_b32_e32 v9, 0x4000
; MUBUF-NEXT: buffer_load_dword v2, v2, s[0:3], 0 offen glc
@@ -298,7 +306,8 @@ define amdgpu_kernel void @local_stack_offset_uses_sp_flat(ptr addrspace(1) %out
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
; FLATSCR-NEXT: s_cbranch_scc1 .LBB2_1
; FLATSCR-NEXT: ; %bb.2: ; %split
-; FLATSCR-NEXT: s_movk_i32 s0, 0x3000
+; FLATSCR-NEXT: s_movk_i32 s0, 0x1000
+; FLATSCR-NEXT: s_addk_i32 s0, 0x2000
; FLATSCR-NEXT: scratch_load_dwordx2 v[8:9], off, s0 offset:720 glc
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
; FLATSCR-NEXT: scratch_load_dwordx4 v[0:3], off, s0 offset:704 glc
diff --git a/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll b/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll
new file mode 100644
index 0000000..6d0aa1e
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll
@@ -0,0 +1,108 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck %s
+
+define protected amdgpu_kernel void @no_folding_imm_to_inst_with_fi(<4 x i64> %val4, <16 x i64> %val16) {
+; CHECK-LABEL: no_folding_imm_to_inst_with_fi:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_clause 0x2
+; CHECK-NEXT: s_load_b256 s[36:43], s[4:5], 0x24
+; CHECK-NEXT: s_load_b512 s[16:31], s[4:5], 0xe4
+; CHECK-NEXT: s_load_b512 s[0:15], s[4:5], 0xa4
+; CHECK-NEXT: s_mov_b64 s[34:35], src_private_base
+; CHECK-NEXT: s_movk_i32 s33, 0x70
+; CHECK-NEXT: s_movk_i32 s34, 0x60
+; CHECK-NEXT: s_or_b32 s44, 0x80, s33
+; CHECK-NEXT: s_mov_b32 s45, s35
+; CHECK-NEXT: s_or_b32 s46, 0x80, s34
+; CHECK-NEXT: s_mov_b32 s47, s35
+; CHECK-NEXT: v_dual_mov_b32 v20, s44 :: v_dual_mov_b32 v21, s45
+; CHECK-NEXT: v_dual_mov_b32 v22, s46 :: v_dual_mov_b32 v23, s47
+; CHECK-NEXT: s_movk_i32 s34, 0x80
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT: v_dual_mov_b32 v34, s34 :: v_dual_mov_b32 v35, s35
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v1, s41
+; CHECK-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; CHECK-NEXT: v_dual_mov_b32 v4, s36 :: v_dual_mov_b32 v5, s37
+; CHECK-NEXT: v_dual_mov_b32 v6, s38 :: v_dual_mov_b32 v7, s39
+; CHECK-NEXT: scratch_store_b128 off, v[0:3], off offset:16 scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
+; CHECK-NEXT: v_dual_mov_b32 v0, s20 :: v_dual_mov_b32 v1, s21
+; CHECK-NEXT: s_movk_i32 s20, 0x50
+; CHECK-NEXT: v_dual_mov_b32 v8, s28 :: v_dual_mov_b32 v9, s29
+; CHECK-NEXT: v_dual_mov_b32 v10, s30 :: v_dual_mov_b32 v11, s31
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_or_b32 s20, 0x80, s20
+; CHECK-NEXT: s_mov_b32 s21, s35
+; CHECK-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; CHECK-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; CHECK-NEXT: v_dual_mov_b32 v2, s22 :: v_dual_mov_b32 v3, s23
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: v_dual_mov_b32 v25, s21 :: v_dual_mov_b32 v24, s20
+; CHECK-NEXT: scratch_store_b128 off, v[4:7], off scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
+; CHECK-NEXT: flat_store_b128 v[20:21], v[8:11] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
+; CHECK-NEXT: flat_store_b128 v[22:23], v[12:15] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
+; CHECK-NEXT: flat_store_b128 v[24:25], v[0:3] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
+; CHECK-NEXT: v_dual_mov_b32 v0, s16 :: v_dual_mov_b32 v1, s17
+; CHECK-NEXT: s_or_b32 s16, 0x80, 64
+; CHECK-NEXT: s_mov_b32 s17, s35
+; CHECK-NEXT: v_dual_mov_b32 v4, s12 :: v_dual_mov_b32 v5, s13
+; CHECK-NEXT: s_or_b32 s12, 0x80, 48
+; CHECK-NEXT: s_mov_b32 s13, s35
+; CHECK-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
+; CHECK-NEXT: s_or_b32 s8, 0x80, 32
+; CHECK-NEXT: s_mov_b32 s9, s35
+; CHECK-NEXT: v_dual_mov_b32 v12, s4 :: v_dual_mov_b32 v13, s5
+; CHECK-NEXT: s_or_b32 s4, 0x80, 16
+; CHECK-NEXT: s_mov_b32 s5, s35
+; CHECK-NEXT: v_dual_mov_b32 v2, s18 :: v_dual_mov_b32 v3, s19
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: v_dual_mov_b32 v27, s17 :: v_dual_mov_b32 v26, s16
+; CHECK-NEXT: v_dual_mov_b32 v6, s14 :: v_dual_mov_b32 v7, s15
+; CHECK-NEXT: v_dual_mov_b32 v29, s13 :: v_dual_mov_b32 v28, s12
+; CHECK-NEXT: v_dual_mov_b32 v31, s9 :: v_dual_mov_b32 v30, s8
+; CHECK-NEXT: v_dual_mov_b32 v33, s5 :: v_dual_mov_b32 v32, s4
+; CHECK-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
+; CHECK-NEXT: v_dual_mov_b32 v14, s6 :: v_dual_mov_b32 v15, s7
+; CHECK-NEXT: v_dual_mov_b32 v16, s0 :: v_dual_mov_b32 v17, s1
+; CHECK-NEXT: v_dual_mov_b32 v18, s2 :: v_dual_mov_b32 v19, s3
+; CHECK-NEXT: flat_store_b128 v[26:27], v[0:3] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
+; CHECK-NEXT: flat_store_b128 v[28:29], v[4:7] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
+; CHECK-NEXT: flat_store_b128 v[30:31], v[8:11] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
+; CHECK-NEXT: flat_store_b128 v[32:33], v[12:15] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
+; CHECK-NEXT: flat_store_b128 v[34:35], v[16:19] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
+; CHECK-NEXT: flat_load_b128 v[0:3], v[22:23] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: flat_load_b128 v[0:3], v[26:27] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: flat_load_b128 v[0:3], v[24:25] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: flat_load_b128 v[0:3], v[30:31] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: flat_load_b128 v[0:3], v[28:29] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: flat_load_b128 v[0:3], v[34:35] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: flat_load_b128 v[0:3], v[32:33] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: s_endpgm
+bb:
+ %alloca = alloca <4 x i64>, align 32, addrspace(5)
+ %alloca1 = alloca <16 x i64>, align 128, addrspace(5)
+ store volatile <4 x i64> %val4, ptr addrspace(5) %alloca
+ %ascast = addrspacecast ptr addrspace(5) %alloca1 to ptr
+ store volatile <16 x i64> %val16, ptr %ascast
+ %load = load volatile <16 x i64>, ptr %ascast
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/readcyclecounter.ll b/llvm/test/CodeGen/AMDGPU/readcyclecounter.ll
index 131c5f3..f67cbe3 100644
--- a/llvm/test/CodeGen/AMDGPU/readcyclecounter.ll
+++ b/llvm/test/CodeGen/AMDGPU/readcyclecounter.ll
@@ -10,6 +10,8 @@
; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-vopd=0 < %s | FileCheck -check-prefixes=GETREG,GETREG-GISEL -check-prefix=GCN %s
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GCN,GFX12 %s
; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GCN,GFX12 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,GFX1250 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,GFX1250 %s
declare i64 @llvm.readcyclecounter() #0
@@ -21,6 +23,7 @@ declare i64 @llvm.readcyclecounter() #0
; GFX12: s_getreg_b32 [[HI2:s[0-9]+]], hwreg(HW_REG_SHADER_CYCLES_HI)
; GFX12: s_cmp_eq_u32 [[HI1]], [[HI2]]
; GFX12: s_cselect_b32 {{s[0-9]+}}, [[LO1]], 0
+; GFX1250: s_get_shader_cycles_u64 s{{\[[0-9]+:[0-9]+\]}}
; GCN-DAG: kmcnt
; MEMTIME: store_dwordx2
; SIVI-NOT: kmcnt
@@ -53,6 +56,7 @@ define amdgpu_kernel void @test_readcyclecounter(ptr addrspace(1) %out) #0 {
; GFX12: s_getreg_b32 [[HI1:s[0-9]+]], hwreg(HW_REG_SHADER_CYCLES_HI)
; GFX12: s_getreg_b32 [[LO1:s[0-9]+]], hwreg(HW_REG_SHADER_CYCLES_LO)
; GFX12: s_getreg_b32 [[HI2:s[0-9]+]], hwreg(HW_REG_SHADER_CYCLES_HI)
+; GFX1250: s_get_shader_cycles_u64 s{{\[[0-9]+:[0-9]+\]}}
; GCN-DAG: s_load_{{dword|b32|b64}}
; GETREG-DAG: s_getreg_b32 s{{[0-9]+}}, hwreg(HW_REG_SHADER_CYCLES, 0, 20)
; GFX12: s_cmp_eq_u32 [[HI1]], [[HI2]]
diff --git a/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll b/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll
index 735720a..725d57d 100644
--- a/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll
+++ b/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll
@@ -285,7 +285,7 @@ define amdgpu_ps void @flat_store_b32_idxprom(ptr align 4 inreg %p, i32 %idx) {
; GCN-LABEL: flat_store_b32_idxprom:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: v_mov_b32_e32 v1, 1.0
-; GCN-NEXT: flat_store_b32 v0, v1, s[0:1] scale_offset
+; GCN-NEXT: flat_store_b32 v0, v1, s[0:1] scale_offset scope:SCOPE_SE
; GCN-NEXT: s_endpgm
entry:
%idxprom = sext i32 %idx to i64
@@ -298,7 +298,7 @@ define amdgpu_ps void @flat_store_b16_idxprom(ptr align 2 inreg %p, i32 %idx) {
; GCN-LABEL: flat_store_b16_idxprom:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: v_mov_b32_e32 v1, 1
-; GCN-NEXT: flat_store_b16 v0, v1, s[0:1] scale_offset
+; GCN-NEXT: flat_store_b16 v0, v1, s[0:1] scale_offset scope:SCOPE_SE
; GCN-NEXT: s_endpgm
entry:
%idxprom = sext i32 %idx to i64
@@ -311,7 +311,7 @@ define amdgpu_ps void @flat_store_b64_idxprom(ptr align 4 inreg %p, i32 %idx) {
; GCN-LABEL: flat_store_b64_idxprom:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: v_mov_b64_e32 v[2:3], 1.0
-; GCN-NEXT: flat_store_b64 v0, v[2:3], s[0:1] scale_offset
+; GCN-NEXT: flat_store_b64 v0, v[2:3], s[0:1] scale_offset scope:SCOPE_SE
; GCN-NEXT: s_endpgm
entry:
%idxprom = sext i32 %idx to i64
@@ -337,12 +337,15 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg %
; SDAG-LABEL: flat_atomicrmw_b64_rtn_idxprom:
; SDAG: ; %bb.0: ; %entry
; SDAG-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; SDAG-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 3, s[0:1]
-; SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
-; SDAG-NEXT: s_mov_b32 s0, exec_lo
+; SDAG-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; SDAG-NEXT: v_xor_b32_e32 v0, s0, v3
+; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; SDAG-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v0
; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v3
+; SDAG-NEXT: s_and_saveexec_b32 s0, vcc_lo
; SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; SDAG-NEXT: s_cbranch_execnz .LBB21_3
; SDAG-NEXT: ; %bb.1: ; %Flow
@@ -360,13 +363,16 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg %
; SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; SDAG-NEXT: s_cbranch_execz .LBB21_2
; SDAG-NEXT: .LBB21_4: ; %atomicrmw.private
+; SDAG-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; SDAG-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[2:3]
-; SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v2, vcc_lo
; SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v2
+; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; SDAG-NEXT: s_wait_loadcnt 0x0
; SDAG-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1]
-; SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
+; SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; SDAG-NEXT: s_wait_xcnt 0x0
; SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; SDAG-NEXT: s_branch .LBB21_5
@@ -374,19 +380,21 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg %
;
; GISEL-LABEL: flat_atomicrmw_b64_rtn_idxprom:
; GISEL: ; %bb.0: ; %entry
+; GISEL-NEXT: s_mov_b32 s2, src_flat_scratch_base_hi
; GISEL-NEXT: v_mov_b32_e32 v2, v0
; GISEL-NEXT: v_mov_b64_e32 v[4:5], s[0:1]
-; GISEL-NEXT: s_mov_b64 s[2:3], src_private_base
-; GISEL-NEXT: s_mov_b32 s2, exec_lo
; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GISEL-NEXT: v_ashrrev_i32_e32 v3, 31, v2
; GISEL-NEXT: v_lshlrev_b64_e32 v[0:1], 3, v[2:3]
; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GISEL-NEXT: v_add_co_u32 v4, vcc_lo, v4, v0
; GISEL-NEXT: v_add_co_ci_u32_e64 v5, null, v5, v1, vcc_lo
+; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GISEL-NEXT: v_xor_b32_e32 v0, s2, v5
+; GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 0x4000000, v0
; GISEL-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GISEL-NEXT: v_cmpx_ne_u32_e64 s3, v5
+; GISEL-NEXT: s_and_saveexec_b32 s2, vcc_lo
+; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GISEL-NEXT: s_xor_b32 s2, exec_lo, s2
; GISEL-NEXT: s_cbranch_execnz .LBB21_3
; GISEL-NEXT: ; %bb.1: ; %Flow
@@ -398,19 +406,22 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg %
; GISEL-NEXT: s_branch .LBB21_5
; GISEL-NEXT: .LBB21_3: ; %atomicrmw.global
; GISEL-NEXT: v_mov_b64_e32 v[0:1], 1
-; GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GISEL-NEXT: ; implicit-def: $vgpr4
; GISEL-NEXT: flat_atomic_add_u64 v[0:1], v2, v[0:1], s[0:1] scale_offset th:TH_ATOMIC_RETURN scope:SCOPE_SYS
; GISEL-NEXT: s_wait_xcnt 0x0
; GISEL-NEXT: s_and_not1_saveexec_b32 s0, s2
; GISEL-NEXT: s_cbranch_execz .LBB21_2
; GISEL-NEXT: .LBB21_4: ; %atomicrmw.private
+; GISEL-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
; GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
+; GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
; GISEL-NEXT: scratch_load_b64 v[0:1], v4, off
; GISEL-NEXT: s_wait_loadcnt 0x0
; GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1]
-; GISEL-NEXT: scratch_store_b64 v4, v[2:3], off
+; GISEL-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GISEL-NEXT: s_wait_xcnt 0x0
; GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GISEL-NEXT: s_branch .LBB21_5
diff --git a/llvm/test/CodeGen/ARM/bad-constraint.ll b/llvm/test/CodeGen/ARM/bad-constraint.ll
index 9b8fcd5..7d80f0c 100644
--- a/llvm/test/CodeGen/ARM/bad-constraint.ll
+++ b/llvm/test/CodeGen/ARM/bad-constraint.ll
@@ -1,6 +1,7 @@
; RUN: not llc -filetype=obj %s -o /dev/null 2>&1 | FileCheck %s
; CHECK: error: couldn't allocate input reg for constraint '{d2}'
; CHECK-NEXT: error: couldn't allocate input reg for constraint '{s2}'
+; CHECK-NEXT: error: couldn't allocate input reg for constraint '{d3}'
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
target triple = "armv8a-unknown-linux-gnueabihf"
@@ -23,3 +24,8 @@ entry:
ret void
}
+define void @_Z1dv() local_unnamed_addr {
+entry:
+ tail call void asm sideeffect "", "{d3}"(<16 x i8> splat (i8 -1))
+ ret void
+}
diff --git a/llvm/test/CodeGen/ARM/inlineasm-vec-to-double.ll b/llvm/test/CodeGen/ARM/inlineasm-vec-to-double.ll
new file mode 100644
index 0000000..0c01bb9
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/inlineasm-vec-to-double.ll
@@ -0,0 +1,14 @@
+; RUN: llc %s -filetype=asm -o - | FileCheck %s
+
+; CHECK: vmov.i8 d3, #0xff
+
+target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "armv8a-unknown-linux-gnueabihf"
+
+; Function Attrs: mustprogress noimplicitfloat nounwind
+define void @cvt_vec() local_unnamed_addr {
+entry:
+ tail call void asm sideeffect "", "{d3}"(<8 x i8> splat (i8 -1))
+ ret void
+}
+
diff --git a/llvm/test/CodeGen/DirectX/issue-140819_allow_forward_handle_on_alloca.ll b/llvm/test/CodeGen/DirectX/issue-140819_allow_forward_handle_on_alloca.ll
new file mode 100644
index 0000000..7c0813b
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/issue-140819_allow_forward_handle_on_alloca.ll
@@ -0,0 +1,33 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -dxil-forward-handle-accesses %s | FileCheck %s
+
+%"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", i32, 1, 0) }
+@global = internal unnamed_addr global %"class.hlsl::RWStructuredBuffer" poison, align 4
+@name = private unnamed_addr constant [5 x i8] c"dest\00", align 1
+
+
+; NOTE: intent of this test is to confirm load target("dx.RawBuffer", i32, 1, 0)
+; is replaced with call @llvm.dx.resource.getpointer
+define void @CSMain() local_unnamed_addr {
+; CHECK-LABEL: define void @CSMain() local_unnamed_addr {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[AGG_TMP_I1_SROA_0:%.*]] = alloca target("dx.RawBuffer", i32, 1, 0), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = tail call target("dx.RawBuffer", i32, 1, 0) @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i32_1_0t(i32 0, i32 3, i32 1, i32 0, i1 false, ptr nonnull @name)
+; CHECK-NEXT: store target("dx.RawBuffer", i32, 1, 0) [[TMP0]], ptr @global, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @global, align 4
+; CHECK-NEXT: store i32 [[TMP2]], ptr [[AGG_TMP_I1_SROA_0]], align 8
+; CHECK-NEXT: [[TMP3:%.*]] = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.dx.resource.getpointer.p0.tdx.RawBuffer_i32_1_0t(target("dx.RawBuffer", i32, 1, 0) [[TMP0]], i32 0)
+; CHECK-NEXT: store i32 0, ptr [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %alloca = alloca target("dx.RawBuffer", i32, 1, 0), align 8
+ %handle = tail call target("dx.RawBuffer", i32, 1, 0) @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i32_1_0t(i32 0, i32 3, i32 1, i32 0, i1 false, ptr nonnull @name)
+ store target("dx.RawBuffer", i32, 1, 0) %handle , ptr @global, align 4
+ %val = load i32, ptr @global, align 4
+ store i32 %val , ptr %alloca, align 8
+ %indirect = load target("dx.RawBuffer", i32, 1, 0), ptr %alloca, align 8
+ %buff = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.dx.resource.getpointer.p0.tdx.RawBuffer_i32_1_0t(target("dx.RawBuffer", i32, 1, 0) %indirect, i32 0)
+ store i32 0, ptr %buff, align 4
+ ret void
+}
diff --git a/llvm/test/CodeGen/Generic/allow-check.ll b/llvm/test/CodeGen/Generic/allow-check.ll
index 148ee81..97719a7 100644
--- a/llvm/test/CodeGen/Generic/allow-check.ll
+++ b/llvm/test/CodeGen/Generic/allow-check.ll
@@ -6,6 +6,7 @@
; XFAIL: target=nvptx{{.*}}
; XFAIL: target=sparc{{.*}}
; XFAIL: target=hexagon-{{.*}}
+; XFAIL: target=arm64ec-{{.*}}
; RUN: llc < %s -O3 -global-isel=0 -fast-isel=0
; RUN: llc < %s -O3 -global-isel=1 -fast-isel=0
diff --git a/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll b/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll
index 3800712..f0277a7 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/fpowi.ll
@@ -11,16 +11,16 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind {
; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
; CHECK-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill
; CHECK-NEXT: addi.w $fp, $a0, 0
-; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 1
-; CHECK-NEXT: movgr2fr.w $fa0, $a0
+; CHECK-NEXT: xvpickve.w $xr0, $xr0, 1
+; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0
; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
-; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 0
-; CHECK-NEXT: movgr2fr.w $fa0, $a0
+; CHECK-NEXT: xvpickve.w $xr0, $xr0, 0
+; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
@@ -29,8 +29,8 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind {
; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 1
; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
-; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 2
-; CHECK-NEXT: movgr2fr.w $fa0, $a0
+; CHECK-NEXT: xvpickve.w $xr0, $xr0, 2
+; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
@@ -39,8 +39,8 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind {
; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 2
; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
-; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 3
-; CHECK-NEXT: movgr2fr.w $fa0, $a0
+; CHECK-NEXT: xvpickve.w $xr0, $xr0, 3
+; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
@@ -49,8 +49,8 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind {
; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 3
; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
-; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 4
-; CHECK-NEXT: movgr2fr.w $fa0, $a0
+; CHECK-NEXT: xvpickve.w $xr0, $xr0, 4
+; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
@@ -59,8 +59,8 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind {
; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 4
; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
-; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 5
-; CHECK-NEXT: movgr2fr.w $fa0, $a0
+; CHECK-NEXT: xvpickve.w $xr0, $xr0, 5
+; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
@@ -69,8 +69,8 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind {
; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 5
; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
-; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 6
-; CHECK-NEXT: movgr2fr.w $fa0, $a0
+; CHECK-NEXT: xvpickve.w $xr0, $xr0, 6
+; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
@@ -79,8 +79,8 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind {
; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 6
; CHECK-NEXT: xvst $xr1, $sp, 48 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
-; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 7
-; CHECK-NEXT: movgr2fr.w $fa0, $a0
+; CHECK-NEXT: xvpickve.w $xr0, $xr0, 7
+; CHECK-NEXT: # kill: def $f0 killed $f0 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
@@ -107,16 +107,16 @@ define <4 x double> @powi_v4f64(<4 x double> %va, i32 %b) nounwind {
; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill
; CHECK-NEXT: addi.w $fp, $a0, 0
-; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1
-; CHECK-NEXT: movgr2fr.d $fa0, $a0
+; CHECK-NEXT: xvpickve.d $xr0, $xr0, 1
+; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2)
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0
; CHECK-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload
-; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0
-; CHECK-NEXT: movgr2fr.d $fa0, $a0
+; CHECK-NEXT: xvpickve.d $xr0, $xr0, 0
+; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2)
; CHECK-NEXT: jirl $ra, $ra, 0
@@ -125,8 +125,8 @@ define <4 x double> @powi_v4f64(<4 x double> %va, i32 %b) nounwind {
; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 1
; CHECK-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload
-; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 2
-; CHECK-NEXT: movgr2fr.d $fa0, $a0
+; CHECK-NEXT: xvpickve.d $xr0, $xr0, 2
+; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2)
; CHECK-NEXT: jirl $ra, $ra, 0
@@ -135,8 +135,8 @@ define <4 x double> @powi_v4f64(<4 x double> %va, i32 %b) nounwind {
; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 2
; CHECK-NEXT: xvst $xr1, $sp, 16 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload
-; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 3
-; CHECK-NEXT: movgr2fr.d $fa0, $a0
+; CHECK-NEXT: xvpickve.d $xr0, $xr0, 3
+; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $xr0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2)
; CHECK-NEXT: jirl $ra, $ra, 0
diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll
index 221aba3..8ee567c 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fix-xvshuf.ll
@@ -6,12 +6,12 @@
define <4 x double> @shufflevector_v4f64(<4 x double> %a, <4 x double> %b) {
; CHECK-LABEL: shufflevector_v4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xvpickve2gr.d $a0, $xr1, 2
-; CHECK-NEXT: xvpickve2gr.d $a1, $xr0, 3
-; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 1
-; CHECK-NEXT: xvinsgr2vr.d $xr0, $a1, 2
-; CHECK-NEXT: xvpickve2gr.d $a0, $xr1, 3
-; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 3
+; CHECK-NEXT: xvpickve.d $xr2, $xr1, 2
+; CHECK-NEXT: xvpickve.d $xr3, $xr0, 3
+; CHECK-NEXT: xvinsve0.d $xr0, $xr2, 1
+; CHECK-NEXT: xvinsve0.d $xr0, $xr3, 2
+; CHECK-NEXT: xvpickve.d $xr1, $xr1, 3
+; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 3
; CHECK-NEXT: ret
entry:
%c = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 6, i32 3, i32 7>
diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insert-extract-element.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insert-extract-element.ll
index 271e3ec..ac5a214 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insert-extract-element.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/insert-extract-element.ll
@@ -42,8 +42,8 @@ entry:
define <8 x float> @insert_extract_v8f32(<8 x float> %a) nounwind {
; CHECK-LABEL: insert_extract_v8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 7
-; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 1
+; CHECK-NEXT: xvpickve.w $xr1, $xr0, 7
+; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 1
; CHECK-NEXT: ret
entry:
%b = extractelement <8 x float> %a, i32 7
@@ -66,8 +66,8 @@ entry:
define <4 x double> @insert_extract_v4f64(<4 x double> %a) nounwind {
; CHECK-LABEL: insert_extract_v4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 3
-; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 1
+; CHECK-NEXT: xvpickve.d $xr1, $xr0, 3
+; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 1
; CHECK-NEXT: ret
entry:
%b = extractelement <4 x double> %a, i32 3
diff --git a/llvm/test/CodeGen/NVPTX/sext-setcc.ll b/llvm/test/CodeGen/NVPTX/sext-setcc.ll
index 9a67bdf..97918a6 100644
--- a/llvm/test/CodeGen/NVPTX/sext-setcc.ll
+++ b/llvm/test/CodeGen/NVPTX/sext-setcc.ll
@@ -29,7 +29,6 @@ define <4 x i8> @sext_setcc_v4i1_to_v4i8(ptr %p) {
; CHECK-LABEL: sext_setcc_v4i1_to_v4i8(
; CHECK: {
; CHECK-NEXT: .reg .pred %p<5>;
-; CHECK-NEXT: .reg .b16 %rs<5>;
; CHECK-NEXT: .reg .b32 %r<13>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
@@ -37,17 +36,13 @@ define <4 x i8> @sext_setcc_v4i1_to_v4i8(ptr %p) {
; CHECK-NEXT: ld.param.b64 %rd1, [sext_setcc_v4i1_to_v4i8_param_0];
; CHECK-NEXT: ld.b32 %r1, [%rd1];
; CHECK-NEXT: prmt.b32 %r2, %r1, 0, 0x7770U;
-; CHECK-NEXT: cvt.u16.u32 %rs1, %r2;
-; CHECK-NEXT: setp.eq.b16 %p1, %rs1, 0;
+; CHECK-NEXT: setp.eq.b32 %p1, %r2, 0;
; CHECK-NEXT: prmt.b32 %r3, %r1, 0, 0x7771U;
-; CHECK-NEXT: cvt.u16.u32 %rs2, %r3;
-; CHECK-NEXT: setp.eq.b16 %p2, %rs2, 0;
+; CHECK-NEXT: setp.eq.b32 %p2, %r3, 0;
; CHECK-NEXT: prmt.b32 %r4, %r1, 0, 0x7772U;
-; CHECK-NEXT: cvt.u16.u32 %rs3, %r4;
-; CHECK-NEXT: setp.eq.b16 %p3, %rs3, 0;
+; CHECK-NEXT: setp.eq.b32 %p3, %r4, 0;
; CHECK-NEXT: prmt.b32 %r5, %r1, 0, 0x7773U;
-; CHECK-NEXT: cvt.u16.u32 %rs4, %r5;
-; CHECK-NEXT: setp.eq.b16 %p4, %rs4, 0;
+; CHECK-NEXT: setp.eq.b32 %p4, %r5, 0;
; CHECK-NEXT: selp.b32 %r6, -1, 0, %p4;
; CHECK-NEXT: selp.b32 %r7, -1, 0, %p3;
; CHECK-NEXT: prmt.b32 %r8, %r7, %r6, 0x3340U;
diff --git a/llvm/test/CodeGen/NVPTX/trunc-setcc.ll b/llvm/test/CodeGen/NVPTX/trunc-setcc.ll
new file mode 100644
index 0000000..f22e37e
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/trunc-setcc.ll
@@ -0,0 +1,269 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mcpu=sm_50 | FileCheck %s
+; RUN: %if ptxas %{ llc < %s -mcpu=sm_50 | %ptxas-verify -arch=sm_50 %}
+
+target triple = "nvptx64-nvidia-cuda"
+
+define i1 @trunc_nsw_singed_const(i32 %a) {
+; CHECK-LABEL: trunc_nsw_singed_const(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<2>;
+; CHECK-NEXT: .reg .b32 %r<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [trunc_nsw_singed_const_param_0];
+; CHECK-NEXT: add.s32 %r2, %r1, 1;
+; CHECK-NEXT: setp.gt.s32 %p1, %r2, -1;
+; CHECK-NEXT: selp.b32 %r3, -1, 0, %p1;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r3;
+; CHECK-NEXT: ret;
+ %a2 = add i32 %a, 1
+ %b = trunc nsw i32 %a2 to i8
+ %c = icmp sgt i8 %b, -1
+ ret i1 %c
+}
+
+define i1 @trunc_nuw_singed_const(i32 %a) {
+; CHECK-LABEL: trunc_nuw_singed_const(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<2>;
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b8 %rs1, [trunc_nuw_singed_const_param_0];
+; CHECK-NEXT: add.s16 %rs2, %rs1, 1;
+; CHECK-NEXT: cvt.s16.s8 %rs3, %rs2;
+; CHECK-NEXT: setp.lt.s16 %p1, %rs3, 100;
+; CHECK-NEXT: selp.b32 %r1, -1, 0, %p1;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT: ret;
+ %a2 = add i32 %a, 1
+ %b = trunc nuw i32 %a2 to i8
+ %c = icmp slt i8 %b, 100
+ ret i1 %c
+}
+
+define i1 @trunc_nsw_unsinged_const(i32 %a) {
+; CHECK-LABEL: trunc_nsw_unsinged_const(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<2>;
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b8 %rs1, [trunc_nsw_unsinged_const_param_0];
+; CHECK-NEXT: add.s16 %rs2, %rs1, 1;
+; CHECK-NEXT: and.b16 %rs3, %rs2, 255;
+; CHECK-NEXT: setp.lt.u16 %p1, %rs3, 236;
+; CHECK-NEXT: selp.b32 %r1, -1, 0, %p1;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT: ret;
+ %a2 = add i32 %a, 1
+ %b = trunc nsw i32 %a2 to i8
+ %c = icmp ult i8 %b, -20
+ ret i1 %c
+}
+
+define i1 @trunc_nuw_unsinged_const(i32 %a) {
+; CHECK-LABEL: trunc_nuw_unsinged_const(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<2>;
+; CHECK-NEXT: .reg .b32 %r<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [trunc_nuw_unsinged_const_param_0];
+; CHECK-NEXT: add.s32 %r2, %r1, 1;
+; CHECK-NEXT: setp.gt.u32 %p1, %r2, 100;
+; CHECK-NEXT: selp.b32 %r3, -1, 0, %p1;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r3;
+; CHECK-NEXT: ret;
+ %a2 = add i32 %a, 1
+ %b = trunc nuw i32 %a2 to i8
+ %c = icmp ugt i8 %b, 100
+ ret i1 %c
+}
+
+
+define i1 @trunc_nsw_eq_const(i32 %a) {
+; CHECK-LABEL: trunc_nsw_eq_const(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<2>;
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [trunc_nsw_eq_const_param_0];
+; CHECK-NEXT: setp.eq.b32 %p1, %r1, 99;
+; CHECK-NEXT: selp.b32 %r2, -1, 0, %p1;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r2;
+; CHECK-NEXT: ret;
+ %a2 = add i32 %a, 1
+ %b = trunc nsw i32 %a2 to i8
+ %c = icmp eq i8 %b, 100
+ ret i1 %c
+}
+
+define i1 @trunc_nuw_eq_const(i32 %a) {
+; CHECK-LABEL: trunc_nuw_eq_const(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<2>;
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [trunc_nuw_eq_const_param_0];
+; CHECK-NEXT: setp.eq.b32 %p1, %r1, 99;
+; CHECK-NEXT: selp.b32 %r2, -1, 0, %p1;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r2;
+; CHECK-NEXT: ret;
+ %a2 = add i32 %a, 1
+ %b = trunc nuw i32 %a2 to i8
+ %c = icmp eq i8 %b, 100
+ ret i1 %c
+}
+
+;;;
+
+define i1 @trunc_nsw_singed(i32 %a1, i32 %a2) {
+; CHECK-LABEL: trunc_nsw_singed(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<2>;
+; CHECK-NEXT: .reg .b32 %r<6>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [trunc_nsw_singed_param_0];
+; CHECK-NEXT: add.s32 %r2, %r1, 1;
+; CHECK-NEXT: ld.param.b32 %r3, [trunc_nsw_singed_param_1];
+; CHECK-NEXT: add.s32 %r4, %r3, 7;
+; CHECK-NEXT: setp.gt.s32 %p1, %r2, %r4;
+; CHECK-NEXT: selp.b32 %r5, -1, 0, %p1;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r5;
+; CHECK-NEXT: ret;
+ %b1 = add i32 %a1, 1
+ %b2 = add i32 %a2, 7
+ %c1 = trunc nsw i32 %b1 to i8
+ %c2 = trunc nsw i32 %b2 to i8
+ %c = icmp sgt i8 %c1, %c2
+ ret i1 %c
+}
+
+define i1 @trunc_nuw_singed(i32 %a1, i32 %a2) {
+; CHECK-LABEL: trunc_nuw_singed(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<2>;
+; CHECK-NEXT: .reg .b16 %rs<7>;
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b8 %rs1, [trunc_nuw_singed_param_0];
+; CHECK-NEXT: ld.param.b8 %rs2, [trunc_nuw_singed_param_1];
+; CHECK-NEXT: add.s16 %rs3, %rs1, 1;
+; CHECK-NEXT: cvt.s16.s8 %rs4, %rs3;
+; CHECK-NEXT: add.s16 %rs5, %rs2, 6;
+; CHECK-NEXT: cvt.s16.s8 %rs6, %rs5;
+; CHECK-NEXT: setp.lt.s16 %p1, %rs4, %rs6;
+; CHECK-NEXT: selp.b32 %r1, -1, 0, %p1;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT: ret;
+ %b1 = add i32 %a1, 1
+ %b2 = add i32 %a2, 6
+ %c1 = trunc nuw i32 %b1 to i8
+ %c2 = trunc nuw i32 %b2 to i8
+ %c = icmp slt i8 %c1, %c2
+ ret i1 %c
+}
+
+define i1 @trunc_nsw_unsinged(i32 %a1, i32 %a2) {
+; CHECK-LABEL: trunc_nsw_unsinged(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<2>;
+; CHECK-NEXT: .reg .b16 %rs<7>;
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b8 %rs1, [trunc_nsw_unsinged_param_0];
+; CHECK-NEXT: ld.param.b8 %rs2, [trunc_nsw_unsinged_param_1];
+; CHECK-NEXT: add.s16 %rs3, %rs1, 1;
+; CHECK-NEXT: and.b16 %rs4, %rs3, 255;
+; CHECK-NEXT: add.s16 %rs5, %rs2, 4;
+; CHECK-NEXT: and.b16 %rs6, %rs5, 255;
+; CHECK-NEXT: setp.lt.u16 %p1, %rs4, %rs6;
+; CHECK-NEXT: selp.b32 %r1, -1, 0, %p1;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT: ret;
+ %b1 = add i32 %a1, 1
+ %b2 = add i32 %a2, 4
+ %c1 = trunc nsw i32 %b1 to i8
+ %c2 = trunc nsw i32 %b2 to i8
+ %c = icmp ult i8 %c1, %c2
+ ret i1 %c
+}
+
+define i1 @trunc_nuw_unsinged(i32 %a1, i32 %a2) {
+; CHECK-LABEL: trunc_nuw_unsinged(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<2>;
+; CHECK-NEXT: .reg .b32 %r<6>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [trunc_nuw_unsinged_param_0];
+; CHECK-NEXT: add.s32 %r2, %r1, 1;
+; CHECK-NEXT: ld.param.b32 %r3, [trunc_nuw_unsinged_param_1];
+; CHECK-NEXT: add.s32 %r4, %r3, 5;
+; CHECK-NEXT: setp.gt.u32 %p1, %r2, %r4;
+; CHECK-NEXT: selp.b32 %r5, -1, 0, %p1;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r5;
+; CHECK-NEXT: ret;
+ %b1 = add i32 %a1, 1
+ %b2 = add i32 %a2, 5
+ %c1 = trunc nuw i32 %b1 to i8
+ %c2 = trunc nuw i32 %b2 to i8
+ %c = icmp ugt i8 %c1, %c2
+ ret i1 %c
+}
+
+
+define i1 @trunc_nsw_eq(i32 %a1, i32 %a2) {
+; CHECK-LABEL: trunc_nsw_eq(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<2>;
+; CHECK-NEXT: .reg .b32 %r<6>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [trunc_nsw_eq_param_0];
+; CHECK-NEXT: add.s32 %r2, %r1, 1;
+; CHECK-NEXT: ld.param.b32 %r3, [trunc_nsw_eq_param_1];
+; CHECK-NEXT: add.s32 %r4, %r3, 3;
+; CHECK-NEXT: setp.eq.b32 %p1, %r2, %r4;
+; CHECK-NEXT: selp.b32 %r5, -1, 0, %p1;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r5;
+; CHECK-NEXT: ret;
+ %b1 = add i32 %a1, 1
+ %b2 = add i32 %a2, 3
+ %c1 = trunc nsw i32 %b1 to i8
+ %c2 = trunc nsw i32 %b2 to i8
+ %c = icmp eq i8 %c1, %c2
+ ret i1 %c
+}
+
+define i1 @trunc_nuw_eq(i32 %a1, i32 %a2) {
+; CHECK-LABEL: trunc_nuw_eq(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<2>;
+; CHECK-NEXT: .reg .b32 %r<6>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [trunc_nuw_eq_param_0];
+; CHECK-NEXT: add.s32 %r2, %r1, 2;
+; CHECK-NEXT: ld.param.b32 %r3, [trunc_nuw_eq_param_1];
+; CHECK-NEXT: add.s32 %r4, %r3, 1;
+; CHECK-NEXT: setp.eq.b32 %p1, %r2, %r4;
+; CHECK-NEXT: selp.b32 %r5, -1, 0, %p1;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r5;
+; CHECK-NEXT: ret;
+ %b1 = add i32 %a1, 2
+ %b2 = add i32 %a2, 1
+ %c1 = trunc nuw i32 %b1 to i8
+ %c2 = trunc nuw i32 %b2 to i8
+ %c = icmp eq i8 %c1, %c2
+ ret i1 %c
+}
diff --git a/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll b/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll
index 9ffb4fd..258ddf6 100644
--- a/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-cc-abi-mir.ll
@@ -37,9 +37,9 @@ define signext i8 @test_chars(i8 signext %c1, i8 signext %c2, i8 signext %c3, i8
; 32BIT: bb.0.entry:
; 32BIT-NEXT: liveins: $r3, $r4, $r5, $r6
; 32BIT-NEXT: {{ $}}
- ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r4
- ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r5
- ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r6
+ ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r4
+ ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r5
+ ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r6
; 32BIT-NEXT: renamable $r3 = EXTSB killed renamable $r3
; 32BIT-NEXT: BLR implicit $lr, implicit $rm, implicit $r3
;
@@ -47,9 +47,9 @@ define signext i8 @test_chars(i8 signext %c1, i8 signext %c2, i8 signext %c3, i8
; 64BIT: bb.0.entry:
; 64BIT-NEXT: liveins: $x3, $x4, $x5, $x6
; 64BIT-NEXT: {{ $}}
- ; 64BIT-NEXT: renamable $r3 = ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3
- ; 64BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, renamable $r5, implicit killed $x5
- ; 64BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, renamable $r6, implicit killed $x6, implicit-def $x3
+ ; 64BIT-NEXT: renamable $r3 = nsw ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3
+ ; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r5, implicit killed $x5
+ ; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r6, implicit killed $x6, implicit-def $x3
; 64BIT-NEXT: renamable $x3 = EXTSB8 killed renamable $x3
; 64BIT-NEXT: BLR8 implicit $lr8, implicit $rm, implicit $x3
entry:
@@ -96,9 +96,9 @@ define signext i8 @test_chars_mix(i8 signext %c1, i8 zeroext %c2, i8 zeroext %c3
; 32BIT: bb.0.entry:
; 32BIT-NEXT: liveins: $r3, $r4, $r5, $r6
; 32BIT-NEXT: {{ $}}
- ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r4
- ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r5
- ; 32BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, killed renamable $r6
+ ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r4
+ ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r5
+ ; 32BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, killed renamable $r6
; 32BIT-NEXT: renamable $r3 = EXTSB killed renamable $r3
; 32BIT-NEXT: BLR implicit $lr, implicit $rm, implicit $r3
;
@@ -106,9 +106,9 @@ define signext i8 @test_chars_mix(i8 signext %c1, i8 zeroext %c2, i8 zeroext %c3
; 64BIT: bb.0.entry:
; 64BIT-NEXT: liveins: $x3, $x4, $x5, $x6
; 64BIT-NEXT: {{ $}}
- ; 64BIT-NEXT: renamable $r3 = ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3
- ; 64BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, renamable $r5, implicit killed $x5
- ; 64BIT-NEXT: renamable $r3 = ADD4 killed renamable $r3, renamable $r6, implicit killed $x6, implicit-def $x3
+ ; 64BIT-NEXT: renamable $r3 = nsw ADD4 renamable $r3, renamable $r4, implicit killed $x4, implicit killed $x3
+ ; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r5, implicit killed $x5
+ ; 64BIT-NEXT: renamable $r3 = nsw ADD4 killed renamable $r3, renamable $r6, implicit killed $x6, implicit-def $x3
; 64BIT-NEXT: renamable $x3 = EXTSB8 killed renamable $x3
; 64BIT-NEXT: BLR8 implicit $lr8, implicit $rm, implicit $x3
entry:
diff --git a/llvm/test/CodeGen/PowerPC/aix-nest-param.ll b/llvm/test/CodeGen/PowerPC/aix-nest-param.ll
index 1863eaf..bfc7fbb 100644
--- a/llvm/test/CodeGen/PowerPC/aix-nest-param.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-nest-param.ll
@@ -1,5 +1,5 @@
-; RUN: not --crash llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s
-; RUN: not --crash llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s
+; RUN: llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s
+; RUN: llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s
define ptr @nest_receiver(ptr nest %arg) nounwind {
ret ptr %arg
@@ -9,5 +9,10 @@ define ptr @nest_caller(ptr %arg) nounwind {
%result = call ptr @nest_receiver(ptr nest %arg)
ret ptr %result
}
+; CHECK-LABEL: .nest_receiver:
+; CHECK: mr 3, 11
+; CHECK: blr
-; CHECK: LLVM ERROR: Nest arguments are unimplemented.
+; CHECK-LABEL: .nest_caller:
+; CHECK: mr 11, 3
+; CHECK: bl .nest_receiver
diff --git a/llvm/test/CodeGen/PowerPC/aix-trampoline.ll b/llvm/test/CodeGen/PowerPC/aix-trampoline.ll
index b71f6b5..19df220 100644
--- a/llvm/test/CodeGen/PowerPC/aix-trampoline.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-trampoline.ll
@@ -1,7 +1,7 @@
-; RUN: not --crash llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s
-; RUN: not --crash llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s
-
-; CHECK: LLVM ERROR: INIT_TRAMPOLINE operation is not supported on AIX.
+; RUN: llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | \
+; RUN: FileCheck %s --check-prefix=32BIT
+; RUN: llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 -mattr=-altivec | \
+; RUN: FileCheck %s --check-prefix=64BIT
define void @create_trampoline(ptr %buffer, ptr %nval) nounwind {
entry:
@@ -12,3 +12,17 @@ entry:
declare i32 @nested(i32);
declare void @llvm.init.trampoline(ptr, ptr, ptr) nounwind
+
+; 32BIT: stw 4, 8(3)
+; 32BIT: lwz [[FuncDesc:[0-9]+]], L..C0(2)
+; 32BIT-DAG: lwz [[SCRATCH1:[0-9]+]], 0([[FuncDesc]])
+; 32BIT-DAG: lwz [[SCRATCH2:[0-9]+]], 4([[FuncDesc]])
+; 32BIT-DAG: stw [[SCRATCH1]], 0(3)
+; 32BIT-DAG: stw [[SCRATCH2]], 4(3)
+
+; 64BIT: std 4, 16(3)
+; 64BIT-DAG: ld [[FuncDesc:[0-9]+]], L..C0(2)
+; 64BIT-DAG: ld [[SCRATCH1:[0-9]+]], 0([[FuncDesc]])
+; 64BIT-DAG: ld [[SCRATCH2:[0-9]+]], 8([[FuncDesc]])
+; 64BIT-DAG: std [[SCRATCH1]], 0(3)
+; 64BIT-DAG: std [[SCRATCH2]], 8(3)
diff --git a/llvm/test/CodeGen/PowerPC/check-zero-vector.ll b/llvm/test/CodeGen/PowerPC/check-zero-vector.ll
index 59173e2..d8e66d6 100644
--- a/llvm/test/CodeGen/PowerPC/check-zero-vector.ll
+++ b/llvm/test/CodeGen/PowerPC/check-zero-vector.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu \
; RUN: < %s | FileCheck %s --check-prefix=POWERPC_64LE
@@ -7,240 +8,90 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc-ibm-aix \
; RUN: < %s | FileCheck %s --check-prefix=POWERPC_32
-define i32 @test_Greater_than(ptr %colauths, i32 signext %ncols) {
-; This testcase is manually reduced to isolate the critical code blocks.
-; It is designed to check for vector comparison specifically for zero vectors.
-; In the vector.body section, we are expecting a comparison instruction (vcmpequh),
-; merge instructions (vmrghh and vmrglh) which use exactly 2 vectors.
-; The output of the merge instruction is being used by xxland and finally
-; accumulated by vadduwm instruction.
-
+define i32 @test_Greater_than(ptr %colauths) {
+; This testcase is for the special case of zero-vector comparisons.
+; Currently the generated code does a comparison (vcmpequh) and then a negation (xxlnor).
+; This pattern is expected to be optimized in a future patch.
; POWERPC_64LE-LABEL: test_Greater_than:
-; POWERPC_64LE: .LBB0_6: # %vector.body
-; POWERPC_64LE-NEXT: #
-; POWERPC_64LE-NEXT: lxv [[R1:[0-9]+]], -64(4)
-; POWERPC_64LE-NEXT: vcmpequh [[R2:[0-9]+]], [[R2]], [[R3:[0-9]+]]
-; POWERPC_64LE-NEXT: xxlnor [[R1]], [[R1]], [[R1]]
-; POWERPC_64LE-NEXT: vmrghh [[R4:[0-9]+]], [[R2]], [[R2]]
-; POWERPC_64LE-NEXT: vmrglh [[R2]], [[R2]], [[R2]]
-; POWERPC_64LE-NEXT: xxland [[R5:[0-9]+]], [[R5]], [[R6:[0-9]+]]
-; POWERPC_64LE-NEXT: xxland [[R1]], [[R1]], [[R6]]
-; POWERPC_64LE-NEXT: vadduwm [[R7:[0-9]+]], [[R7]], [[R4]]
-; POWERPC_64LE: .LBB0_10: # %vec.epilog.vector.body
-; POWERPC_64LE-NEXT: #
-; POWERPC_64LE-NEXT: lxv [[R8:[0-9]+]], 0(4)
-; POWERPC_64LE-NEXT: addi 4, 4, 16
-; POWERPC_64LE-NEXT: vcmpequh [[R9:[0-9]+]], [[R9]], [[R10:[0-9]+]]
-; POWERPC_64LE-NEXT: xxlnor [[R8]], [[R8]], [[R8]]
-; POWERPC_64LE-NEXT: vmrglh [[R11:[0-9]+]], [[R9]], [[R9]]
-; POWERPC_64LE-NEXT: vmrghh [[R9]], [[R9]], [[R9]]
-; POWERPC_64LE-NEXT: xxland [[R12:[0-9]+]], [[R12]], [[R6]]
-; POWERPC_64LE-NEXT: xxland [[R8]], [[R8]], [[R6]]
-; POWERPC_64LE-NEXT: vadduwm [[R7]], [[R7]], [[R9]]
-; POWERPC_64LE-NEXT: vadduwm [[R3]], [[R3]], [[R11]]
-; POWERPC_64LE-NEXT: bdnz .LBB0_10
-; POWERPC_64LE: blr
+; POWERPC_64LE: # %bb.0: # %entry
+; POWERPC_64LE-NEXT: lfd 0, 0(3)
+; POWERPC_64LE-NEXT: xxlxor 35, 35, 35
+; POWERPC_64LE-NEXT: li 4, 0
+; POWERPC_64LE-NEXT: li 3, 4
+; POWERPC_64LE-NEXT: xxswapd 34, 0
+; POWERPC_64LE-NEXT: vcmpequh 2, 2, 3
+; POWERPC_64LE-NEXT: xxlnor 34, 34, 34
+; POWERPC_64LE-NEXT: vmrglh 3, 2, 2
+; POWERPC_64LE-NEXT: vextuwrx 4, 4, 2
+; POWERPC_64LE-NEXT: vextuwrx 3, 3, 3
+; POWERPC_64LE-NEXT: clrlwi 4, 4, 31
+; POWERPC_64LE-NEXT: rlwimi 4, 3, 1, 30, 30
+; POWERPC_64LE-NEXT: mfvsrwz 3, 35
+; POWERPC_64LE-NEXT: rlwimi 4, 3, 2, 29, 29
+; POWERPC_64LE-NEXT: li 3, 12
+; POWERPC_64LE-NEXT: vextuwrx 3, 3, 3
+; POWERPC_64LE-NEXT: rlwimi 4, 3, 3, 28, 28
+; POWERPC_64LE-NEXT: stb 4, -1(1)
+; POWERPC_64LE-NEXT: lbz 3, -1(1)
+; POWERPC_64LE-NEXT: popcntd 3, 3
+; POWERPC_64LE-NEXT: blr
;
; POWERPC_64-LABEL: test_Greater_than:
-; POWERPC_64: L..BB0_6: # %vector.body
-; POWERPC_64-NEXT: #
-; POWERPC_64-NEXT: lxv [[R1:[0-9]+]], -64(4)
-; POWERPC_64-NEXT: vcmpequh [[R2:[0-9]+]], [[R2]], [[R3:[0-9]+]]
-; POWERPC_64-NEXT: xxlnor [[R1]], [[R1]], [[R1]]
-; POWERPC_64-NEXT: vmrglh [[R4:[0-9]+]], [[R2]], [[R2]]
-; POWERPC_64-NEXT: vmrghh [[R2]], [[R2]], [[R2]]
-; POWERPC_64-NEXT: xxland [[R5:[0-9]+]], [[R5]], [[R6:[0-9]+]]
-; POWERPC_64-NEXT: xxland [[R1]], [[R1]], [[R6]]
-; POWERPC_64-NEXT: vadduwm [[R7:[0-9]+]], [[R7]], [[R4]]
-; POWERPC_64: L..BB0_10: # %vec.epilog.vector.body
-; POWERPC_64-NEXT: #
-; POWERPC_64-NEXT: lxv [[R8:[0-9]+]], 0(4)
-; POWERPC_64-NEXT: addi 4, 4, 16
-; POWERPC_64-NEXT: vcmpequh [[R9:[0-9]+]], [[R9]], [[R10:[0-9]+]]
-; POWERPC_64-NEXT: xxlnor [[R8]], [[R8]], [[R8]]
-; POWERPC_64-NEXT: vmrghh [[R11:[0-9]+]], [[R9]], [[R9]]
-; POWERPC_64-NEXT: vmrglh [[R9]], [[R9]], [[R9]]
-; POWERPC_64-NEXT: xxland [[R12:[0-9]+]], [[R12]], [[R6]]
-; POWERPC_64-NEXT: xxland [[R8]], [[R8]], [[R6]]
-; POWERPC_64-NEXT: vadduwm [[R7]], [[R7]], [[R9]]
-; POWERPC_64-NEXT: vadduwm [[R3]], [[R3]], [[R11]]
-; POWERPC_64-NEXT: bdnz L..BB0_10
-; POWERPC_64: blr
+; POWERPC_64: # %bb.0: # %entry
+; POWERPC_64-NEXT: lxsd 2, 0(3)
+; POWERPC_64-NEXT: xxlxor 35, 35, 35
+; POWERPC_64-NEXT: li 4, 12
+; POWERPC_64-NEXT: li 3, 8
+; POWERPC_64-NEXT: vcmpequh 2, 2, 3
+; POWERPC_64-NEXT: xxlnor 34, 34, 34
+; POWERPC_64-NEXT: vmrghh 2, 2, 2
+; POWERPC_64-NEXT: vextuwlx 4, 4, 2
+; POWERPC_64-NEXT: vextuwlx 3, 3, 2
+; POWERPC_64-NEXT: clrlwi 4, 4, 31
+; POWERPC_64-NEXT: rlwimi 4, 3, 1, 30, 30
+; POWERPC_64-NEXT: mfvsrwz 3, 34
+; POWERPC_64-NEXT: rlwimi 4, 3, 2, 29, 29
+; POWERPC_64-NEXT: li 3, 0
+; POWERPC_64-NEXT: vextuwlx 3, 3, 2
+; POWERPC_64-NEXT: rlwimi 4, 3, 3, 28, 28
+; POWERPC_64-NEXT: stb 4, -1(1)
+; POWERPC_64-NEXT: lbz 3, -1(1)
+; POWERPC_64-NEXT: popcntd 3, 3
+; POWERPC_64-NEXT: blr
;
; POWERPC_32-LABEL: test_Greater_than:
-; POWERPC_32: L..BB0_7: # %vector.body
-; POWERPC_32-NEXT: #
-; POWERPC_32-NEXT: lxv [[R1:[0-9]+]], 0(10)
-; POWERPC_32-NEXT: addic [[R13:[0-9]+]], [[R13]], 64
-; POWERPC_32-NEXT: addze [[R14:[0-9]+]], [[R14]]
-; POWERPC_32-NEXT: xor [[R15:[0-9]+]], [[R13]], [[R16:[0-9]+]]
-; POWERPC_32-NEXT: or. [[R15]], [[R15]], [[R14]]
-; POWERPC_32-NEXT: vcmpequh [[R2:[0-9]+]], [[R2]], [[R3:[0-9]+]]
-; POWERPC_32-NEXT: xxlnor [[R1]], [[R1]], [[R1]]
-; POWERPC_32-NEXT: vmrglh [[R4:[0-9]+]], [[R2]], [[R2]]
-; POWERPC_32-NEXT: vmrghh [[R2]], [[R2]], [[R2]]
-; POWERPC_32-NEXT: xxland [[R5:[0-9]+]], [[R5]], [[R6:[0-9]+]]
-; POWERPC_32-NEXT: xxland [[R1]], [[R1]], [[R6]]
-; POWERPC_32-NEXT: vadduwm [[R7:[0-9]+]], [[R7]], [[R4]]
-; POWERPC_32: L..BB0_11: # %vec.epilog.vector.body
-; POWERPC_32-NEXT: #
-; POWERPC_32-NEXT: slwi [[R14]], [[R13]], 1
-; POWERPC_32-NEXT: addic [[R13]], [[R13]], 8
-; POWERPC_32-NEXT: addze [[R17:[0-9]+]], [[R17]]
-; POWERPC_32-NEXT: lxvx [[R8:[0-9]+]], [[R18:[0-9]+]], [[R14]]
-; POWERPC_32-NEXT: xor [[R14]], [[R13]], [[R16]]
-; POWERPC_32-NEXT: or. [[R14]], [[R14]], [[R17]]
-; POWERPC_32-NEXT: vcmpequh [[R9:[0-9]+]], [[R9]], [[R3]]
-; POWERPC_32-NEXT: xxlnor [[R8]], [[R8]], [[R8]]
-; POWERPC_32-NEXT: vmrghh [[R11:[0-9]+]], [[R9]], [[R9]]
-; POWERPC_32-NEXT: vmrglh [[R9]], [[R9]], [[R9]]
-; POWERPC_32-NEXT: xxland [[R12:[0-9]+]], [[R12]], [[R6]]
-; POWERPC_32-NEXT: xxland [[R8]], [[R8]], [[R6]]
-; POWERPC_32-NEXT: vadduwm [[R7]], [[R7]], [[R9]]
-; POWERPC_32-NEXT: vadduwm [[R19:[0-9]+]], [[R19]], [[R11]]
-; POWERPC_32-NEXT: bne 0, L..BB0_11
-; POWERPC_32: blr
- entry:
- %cmp5 = icmp sgt i32 %ncols, 0
- br i1 %cmp5, label %iter.check, label %for.cond.cleanup
-
-iter.check: ; preds = %entry
- %wide.trip.count = zext nneg i32 %ncols to i64
- %min.iters.check = icmp ult i32 %ncols, 8
- br i1 %min.iters.check, label %for.body.preheader, label %vector.main.loop.iter.check
-
-for.body.preheader: ; preds = %vec.epilog.iter.check, %vec.epilog.middle.block, %iter.check
- %indvars.iv.ph = phi i64 [ 0, %iter.check ], [ %n.vec, %vec.epilog.iter.check ], [ %n.vec31, %vec.epilog.middle.block ]
- %num_cols_needed.06.ph = phi i32 [ 0, %iter.check ], [ %33, %vec.epilog.iter.check ], [ %40, %vec.epilog.middle.block ]
- br label %for.body
-
-vector.main.loop.iter.check: ; preds = %iter.check
- %min.iters.check9 = icmp ult i32 %ncols, 64
- br i1 %min.iters.check9, label %vec.epilog.ph, label %vector.ph
-
-vector.ph: ; preds = %vector.main.loop.iter.check
- %n.vec = and i64 %wide.trip.count, 2147483584
- br label %vector.body
-
-vector.body: ; preds = %vector.body, %vector.ph
- %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %vec.phi = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %24, %vector.body ]
- %vec.phi10 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %25, %vector.body ]
- %vec.phi11 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %26, %vector.body ]
- %vec.phi12 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %27, %vector.body ]
- %vec.phi13 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %28, %vector.body ]
- %vec.phi14 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %29, %vector.body ]
- %vec.phi15 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %30, %vector.body ]
- %vec.phi16 = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ %31, %vector.body ]
- %0 = getelementptr inbounds nuw i16, ptr %colauths, i64 %index
- %1 = getelementptr inbounds nuw i8, ptr %0, i64 16
- %2 = getelementptr inbounds nuw i8, ptr %0, i64 32
- %3 = getelementptr inbounds nuw i8, ptr %0, i64 48
- %4 = getelementptr inbounds nuw i8, ptr %0, i64 64
- %5 = getelementptr inbounds nuw i8, ptr %0, i64 80
- %6 = getelementptr inbounds nuw i8, ptr %0, i64 96
- %7 = getelementptr inbounds nuw i8, ptr %0, i64 112
- %wide.load = load <8 x i16>, ptr %0, align 2, !tbaa !5
- %wide.load17 = load <8 x i16>, ptr %1, align 2, !tbaa !5
- %wide.load18 = load <8 x i16>, ptr %2, align 2, !tbaa !5
- %wide.load19 = load <8 x i16>, ptr %3, align 2, !tbaa !5
- %wide.load20 = load <8 x i16>, ptr %4, align 2, !tbaa !5
- %wide.load21 = load <8 x i16>, ptr %5, align 2, !tbaa !5
- %wide.load22 = load <8 x i16>, ptr %6, align 2, !tbaa !5
- %wide.load23 = load <8 x i16>, ptr %7, align 2, !tbaa !5
- %8 = icmp ne <8 x i16> %wide.load, zeroinitializer
- %9 = icmp ne <8 x i16> %wide.load17, zeroinitializer
- %10 = icmp ne <8 x i16> %wide.load18, zeroinitializer
- %11 = icmp ne <8 x i16> %wide.load19, zeroinitializer
- %12 = icmp ne <8 x i16> %wide.load20, zeroinitializer
- %13 = icmp ne <8 x i16> %wide.load21, zeroinitializer
- %14 = icmp ne <8 x i16> %wide.load22, zeroinitializer
- %15 = icmp ne <8 x i16> %wide.load23, zeroinitializer
- %16 = zext <8 x i1> %8 to <8 x i32>
- %17 = zext <8 x i1> %9 to <8 x i32>
- %18 = zext <8 x i1> %10 to <8 x i32>
- %19 = zext <8 x i1> %11 to <8 x i32>
- %20 = zext <8 x i1> %12 to <8 x i32>
- %21 = zext <8 x i1> %13 to <8 x i32>
- %22 = zext <8 x i1> %14 to <8 x i32>
- %23 = zext <8 x i1> %15 to <8 x i32>
- %24 = add <8 x i32> %vec.phi, %16
- %25 = add <8 x i32> %vec.phi10, %17
- %26 = add <8 x i32> %vec.phi11, %18
- %27 = add <8 x i32> %vec.phi12, %19
- %28 = add <8 x i32> %vec.phi13, %20
- %29 = add <8 x i32> %vec.phi14, %21
- %30 = add <8 x i32> %vec.phi15, %22
- %31 = add <8 x i32> %vec.phi16, %23
- %index.next = add nuw i64 %index, 64
- %32 = icmp eq i64 %index.next, %n.vec
- br i1 %32, label %middle.block, label %vector.body, !llvm.loop !9
-
-middle.block: ; preds = %vector.body
- %bin.rdx = add <8 x i32> %25, %24
- %bin.rdx24 = add <8 x i32> %26, %bin.rdx
- %bin.rdx25 = add <8 x i32> %27, %bin.rdx24
- %bin.rdx26 = add <8 x i32> %28, %bin.rdx25
- %bin.rdx27 = add <8 x i32> %29, %bin.rdx26
- %bin.rdx28 = add <8 x i32> %30, %bin.rdx27
- %bin.rdx29 = add <8 x i32> %31, %bin.rdx28
- %33 = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %bin.rdx29)
- %cmp.n = icmp eq i64 %n.vec, %wide.trip.count
- br i1 %cmp.n, label %for.cond.cleanup, label %vec.epilog.iter.check
-
-vec.epilog.iter.check: ; preds = %middle.block
- %n.vec.remaining = and i64 %wide.trip.count, 56
- %min.epilog.iters.check = icmp eq i64 %n.vec.remaining, 0
- br i1 %min.epilog.iters.check, label %for.body.preheader, label %vec.epilog.ph
-
-vec.epilog.ph: ; preds = %vec.epilog.iter.check, %vector.main.loop.iter.check
- %vec.epilog.resume.val = phi i64 [ %n.vec, %vec.epilog.iter.check ], [ 0, %vector.main.loop.iter.check ]
- %bc.merge.rdx = phi i32 [ %33, %vec.epilog.iter.check ], [ 0, %vector.main.loop.iter.check ]
- %n.vec31 = and i64 %wide.trip.count, 2147483640
- %34 = insertelement <8 x i32> <i32 poison, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, i32 %bc.merge.rdx, i64 0
- br label %vec.epilog.vector.body
-
-vec.epilog.vector.body: ; preds = %vec.epilog.vector.body, %vec.epilog.ph
- %index32 = phi i64 [ %vec.epilog.resume.val, %vec.epilog.ph ], [ %index.next35, %vec.epilog.vector.body ]
- %vec.phi33 = phi <8 x i32> [ %34, %vec.epilog.ph ], [ %38, %vec.epilog.vector.body ]
- %35 = getelementptr inbounds nuw i16, ptr %colauths, i64 %index32
- %wide.load34 = load <8 x i16>, ptr %35, align 2, !tbaa !5
- %36 = icmp ne <8 x i16> %wide.load34, zeroinitializer
- %37 = zext <8 x i1> %36 to <8 x i32>
- %38 = add <8 x i32> %vec.phi33, %37
- %index.next35 = add nuw i64 %index32, 8
- %39 = icmp eq i64 %index.next35, %n.vec31
- br i1 %39, label %vec.epilog.middle.block, label %vec.epilog.vector.body, !llvm.loop !13
-
-vec.epilog.middle.block: ; preds = %vec.epilog.vector.body
- %40 = tail call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %38)
- %cmp.n36 = icmp eq i64 %n.vec31, %wide.trip.count
- br i1 %cmp.n36, label %for.cond.cleanup, label %for.body.preheader
-
-for.cond.cleanup: ; preds = %for.body, %middle.block, %vec.epilog.middle.block, %entry
- %num_cols_needed.0.lcssa = phi i32 [ 0, %entry ], [ %33, %middle.block ], [ %40, %vec.epilog.middle.block ], [ %spec.select, %for.body ]
- ret i32 %num_cols_needed.0.lcssa
-
-for.body: ; preds = %for.body.preheader, %for.body
- %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
- %num_cols_needed.06 = phi i32 [ %spec.select, %for.body ], [ %num_cols_needed.06.ph, %for.body.preheader ]
- %arrayidx = getelementptr inbounds nuw i16, ptr %colauths, i64 %indvars.iv
- %41 = load i16, ptr %arrayidx, align 2, !tbaa !5
- %tobool.not = icmp ne i16 %41, 0
- %inc = zext i1 %tobool.not to i32
- %spec.select = add nuw nsw i32 %num_cols_needed.06, %inc
- %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
- br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !14
+; POWERPC_32: # %bb.0: # %entry
+; POWERPC_32-NEXT: li 4, 4
+; POWERPC_32-NEXT: lxvwsx 1, 0, 3
+; POWERPC_32-NEXT: xxlxor 35, 35, 35
+; POWERPC_32-NEXT: lxvwsx 0, 3, 4
+; POWERPC_32-NEXT: xxmrghw 34, 1, 0
+; POWERPC_32-NEXT: vcmpequh 2, 2, 3
+; POWERPC_32-NEXT: xxlnor 34, 34, 34
+; POWERPC_32-NEXT: vmrghh 2, 2, 2
+; POWERPC_32-NEXT: stxv 34, -32(1)
+; POWERPC_32-NEXT: lwz 3, -20(1)
+; POWERPC_32-NEXT: lwz 4, -24(1)
+; POWERPC_32-NEXT: clrlwi 3, 3, 31
+; POWERPC_32-NEXT: rlwimi 3, 4, 1, 30, 30
+; POWERPC_32-NEXT: lwz 4, -28(1)
+; POWERPC_32-NEXT: rlwimi 3, 4, 2, 29, 29
+; POWERPC_32-NEXT: lwz 4, -32(1)
+; POWERPC_32-NEXT: rlwimi 3, 4, 3, 28, 28
+; POWERPC_32-NEXT: popcntw 3, 3
+; POWERPC_32-NEXT: blr
+entry:
+ %0 = load <4 x i16>, ptr %colauths, align 2, !tbaa !5
+ %1 = icmp ne <4 x i16> %0, zeroinitializer
+ %2 = bitcast <4 x i1> %1 to i4
+ %3 = tail call range(i4 0, 5) i4 @llvm.ctpop.i4(i4 %2)
+ %4 = zext nneg i4 %3 to i32
+ ret i32 %4
}
+declare i4 @llvm.ctpop.i4(i4) #1
+
!5 = !{!6, !6, i64 0}
!6 = !{!"short", !7, i64 0}
!7 = !{!"omnipotent char", !8, i64 0}
!8 = !{!"Simple C/C++ TBAA"}
-!9 = distinct !{!9, !10, !11, !12}
-!10 = !{!"llvm.loop.mustprogress"}
-!11 = !{!"llvm.loop.isvectorized", i32 1}
-!12 = !{!"llvm.loop.unroll.runtime.disable"}
-!13 = distinct !{!13, !10, !11, !12}
-!14 = distinct !{!14, !10, !12, !11}
diff --git a/llvm/test/CodeGen/PowerPC/mtvsrbmi.ll b/llvm/test/CodeGen/PowerPC/mtvsrbmi.ll
index 232014d..a9503f7 100644
--- a/llvm/test/CodeGen/PowerPC/mtvsrbmi.ll
+++ b/llvm/test/CodeGen/PowerPC/mtvsrbmi.ll
@@ -2,22 +2,87 @@
; Verify whether the generated assembly for the following function includes the mtvsrbmi instruction.
; vector unsigned char v00FF()
; {
-; vector unsigned char x = { 0xFF, 0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 };
-; return x;
+; vector unsigned char x = { 0xFF, 0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 };
+; return x;
+; }
+; vector unsigned short short00FF()
+; {
+; vector unsigned short x = { 0xFF, 0,0,0, 0,0,0,0};
+; return x;
+; }
+; vector unsigned int int00FF()
+; {
+; vector unsigned int x = { 0xFF, 0,0,0};
+; return x;
+; }
+; vector unsigned long long longlong00FF()
+; {
+; vector unsigned long long x = { 0xFF, 0};
+; return x;
; }
; RUN: llc < %s -ppc-asm-full-reg-names -mtriple=powerpc-ibm-aix -mcpu=pwr10 -verify-machineinstrs \
-; RUN: | FileCheck %s --check-prefix=CHECK
+; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-BE
+
+; RUN: llc < %s -ppc-asm-full-reg-names -mtriple=powerpc64le-unknown-gnu-linux -mcpu=pwr10 -verify-machineinstrs \
+; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-LE
+
+; CHECK-NOT: .byte 255
+; CHECK-NOT: .byte 0
define dso_local noundef range(i8 -1, 1) <16 x i8> @_Z5v00FFv() {
-; CHECK-NOT: L..CPI0_0:
-; CHECK-NOT: .byte 255 # 0xff
-; CHECK-NOT: .byte 0 # 0x0
-
-; CHECK-LABEL: _Z5v00FFv:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: mtvsrbmi v2, 1
-; CHECK-NEXT: blr
+; CHECK-BE-LABEL: _Z5v00FFv:
+; CHECK-BE: # %bb.0: # %entry
+; CHECK-BE-NEXT: mtvsrbmi v2, 32768
+; CHECK-BE-NEXT: blr
+;
+; CHECK-LE-LABEL: _Z5v00FFv:
+; CHECK-LE: # %bb.0: # %entry
+; CHECK-LE-NEXT: mtvsrbmi v2, 1
+; CHECK-LE-NEXT: blr
+
entry:
ret <16 x i8> <i8 -1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
}
+
+define dso_local noundef range(i16 0, 256) <8 x i16> @_Z9short00FFv() {
+; CHECK-BE-LABEL: _Z9short00FFv:
+; CHECK-BE: # %bb.0: # %entry
+; CHECK-BE-NEXT: mtvsrbmi v2, 16384
+; CHECK-BE-NEXT: blr
+;
+; CHECK-LE-LABEL: _Z9short00FFv:
+; CHECK-LE: # %bb.0: # %entry
+; CHECK-LE-NEXT: mtvsrbmi v2, 1
+; CHECK-LE-NEXT: blr
+entry:
+ ret <8 x i16> <i16 255, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
+}
+
+define dso_local noundef range(i32 0, 256) <4 x i32> @_Z7int00FFv() {
+; CHECK-BE-LABEL: _Z7int00FFv:
+; CHECK-BE: # %bb.0: # %entry
+; CHECK-BE-NEXT: mtvsrbmi v2, 4096
+; CHECK-BE-NEXT: blr
+;
+; CHECK-LE-LABEL: _Z7int00FFv:
+; CHECK-LE: # %bb.0: # %entry
+; CHECK-LE-NEXT: mtvsrbmi v2, 1
+; CHECK-LE-NEXT: blr
+entry:
+ ret <4 x i32> <i32 255, i32 0, i32 0, i32 0>
+}
+
+define dso_local noundef range(i64 0, 256) <2 x i64> @_Z12longlong00FFv() {
+; CHECK-BE-LABEL: _Z12longlong00FFv:
+; CHECK-BE: # %bb.0: # %entry
+; CHECK-BE-NEXT: mtvsrbmi v2, 256
+; CHECK-BE-NEXT: blr
+;
+; CHECK-LE-LABEL: _Z12longlong00FFv:
+; CHECK-LE: # %bb.0: # %entry
+; CHECK-LE-NEXT: mtvsrbmi v2, 1
+; CHECK-LE-NEXT: blr
+entry:
+ ret <2 x i64> <i64 255, i64 0>
+}
diff --git a/llvm/test/CodeGen/RISCV/features-info.ll b/llvm/test/CodeGen/RISCV/features-info.ll
index b94665b..fb53921 100644
--- a/llvm/test/CodeGen/RISCV/features-info.ll
+++ b/llvm/test/CodeGen/RISCV/features-info.ll
@@ -6,13 +6,21 @@
; CHECK-NEXT: 32bit - Implements RV32.
; CHECK-NEXT: 64bit - Implements RV64.
; CHECK-NEXT: a - 'A' (Atomic Instructions).
+; CHECK-NEXT: add-load-fusion - Enable ADD(.UW) + load macrofusion.
+; CHECK-NEXT: addi-load-fusion - Enable ADDI + load macrofusion.
; CHECK-NEXT: andes45 - Andes 45-Series processors.
; CHECK-NEXT: auipc-addi-fusion - Enable AUIPC+ADDI macrofusion.
+; CHECK-NEXT: auipc-load-fusion - Enable AUIPC + load macrofusion.
; CHECK-NEXT: b - 'B' (the collection of the Zba, Zbb, Zbs extensions).
+; CHECK-NEXT: bfext-fusion - Enable SLLI+SRLI (bitfield extract) macrofusion.
; CHECK-NEXT: c - 'C' (Compressed Instructions).
; CHECK-NEXT: conditional-cmv-fusion - Enable branch+c.mv fusion.
; CHECK-NEXT: d - 'D' (Double-Precision Floating-Point).
; CHECK-NEXT: disable-latency-sched-heuristic - Disable latency scheduling heuristic.
+; CHECK-NEXT: disable-misched-load-clustering - Disable load clustering in the machine scheduler.
+; CHECK-NEXT: disable-misched-store-clustering - Disable store clustering in the machine scheduler.
+; CHECK-NEXT: disable-postmisched-load-clustering - Disable PostRA load clustering in the machine scheduler.
+; CHECK-NEXT: disable-postmisched-store-clustering - Disable PostRA store clustering in the machine scheduler.
; CHECK-NEXT: dlen-factor-2 - Vector unit DLEN(data path width) is half of VLEN.
; CHECK-NEXT: e - 'E' (Embedded Instruction Set with 16 GPRs).
; CHECK-NEXT: exact-asm - Enable Exact Assembly (Disables Compression and Relaxation).
@@ -58,6 +66,7 @@
; CHECK-NEXT: ld-add-fusion - Enable LD+ADD macrofusion.
; CHECK-NEXT: log-vrgather - Has vrgather.vv with LMUL*log2(LMUL) latency
; CHECK-NEXT: lui-addi-fusion - Enable LUI+ADDI macro fusion.
+; CHECK-NEXT: lui-load-fusion - Enable LUI + load macrofusion.
; CHECK-NEXT: m - 'M' (Integer Multiplication and Division).
; CHECK-NEXT: mips-p8700 - MIPS p8700 processor.
; CHECK-NEXT: no-default-unroll - Disable default unroll preference..
@@ -130,6 +139,7 @@
; CHECK-NEXT: shvsatpa - 'Shvsatpa' (vsatp supports all modes supported by satp).
; CHECK-NEXT: shvstvala - 'Shvstvala' (vstval provides all needed values).
; CHECK-NEXT: shvstvecd - 'Shvstvecd' (vstvec supports Direct mode).
+; CHECK-NEXT: shxadd-load-fusion - Enable SH(1|2|3)ADD(.UW) + load macrofusion.
; CHECK-NEXT: sifive7 - SiFive 7-Series processors.
; CHECK-NEXT: smaia - 'Smaia' (Advanced Interrupt Architecture Machine Level).
; CHECK-NEXT: smcdeleg - 'Smcdeleg' (Counter Delegation Machine Level).
diff --git a/llvm/test/CodeGen/RISCV/macro-fusions.mir b/llvm/test/CodeGen/RISCV/macro-fusions.mir
index 1346414..ae5b52d 100644
--- a/llvm/test/CodeGen/RISCV/macro-fusions.mir
+++ b/llvm/test/CodeGen/RISCV/macro-fusions.mir
@@ -2,7 +2,12 @@
# RUN: llc -mtriple=riscv64-linux-gnu -x=mir < %s \
# RUN: -debug-only=machine-scheduler -start-before=machine-scheduler 2>&1 \
# RUN: -mattr=+lui-addi-fusion,+auipc-addi-fusion,+zexth-fusion,+zextw-fusion,+shifted-zextw-fusion,+ld-add-fusion \
+# RUN: -mattr=+add-load-fusion,+auipc-load-fusion,+lui-load-fusion,+addi-load-fusion \
+# RUN: -mattr=+zba,+shxadd-load-fusion \
# RUN: | FileCheck %s
+# RUN: llc -mtriple=riscv64-linux-gnu -x=mir < %s \
+# RUN: -debug-only=machine-scheduler -start-before=machine-scheduler 2>&1 \
+# RUN: -mattr=+zba,+bfext-fusion | FileCheck --check-prefixes=CHECK-BFEXT %s
# CHECK: lui_addi:%bb.0
# CHECK: Macro fuse: {{.*}}LUI - ADDI
@@ -174,3 +179,1374 @@ body: |
$x11 = COPY %5
PseudoRET
...
+
+# CHECK: add_lb
+# CHECK: Macro fuse: {{.*}}ADD - LB
+---
+name: add_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LB %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: add_lh
+# CHECK: Macro fuse: {{.*}}ADD - LH
+---
+name: add_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LH %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: add_lw
+# CHECK: Macro fuse: {{.*}}ADD - LW
+---
+name: add_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LW %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: add_lbu
+# CHECK: Macro fuse: {{.*}}ADD - LBU
+---
+name: add_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LBU %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: add_lhu
+# CHECK: Macro fuse: {{.*}}ADD - LHU
+---
+name: add_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LHU %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: add_lwu
+# CHECK: Macro fuse: {{.*}}ADD - LWU
+---
+name: add_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LWU %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: auipc_lb
+# CHECK: Macro fuse: {{.*}}AUIPC - LB
+---
+name: auipc_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = AUIPC 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LB %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: auipc_lh
+# CHECK: Macro fuse: {{.*}}AUIPC - LH
+---
+name: auipc_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = AUIPC 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LH %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: auipc_lw
+# CHECK: Macro fuse: {{.*}}AUIPC - LW
+---
+name: auipc_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = AUIPC 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LW %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: auipc_ld
+# CHECK: Macro fuse: {{.*}}AUIPC - LD
+---
+name: auipc_ld
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = AUIPC 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LD %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: auipc_lbu
+# CHECK: Macro fuse: {{.*}}AUIPC - LBU
+---
+name: auipc_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = AUIPC 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LBU %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: auipc_lhu
+# CHECK: Macro fuse: {{.*}}AUIPC - LHU
+---
+name: auipc_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = AUIPC 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LHU %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: auipc_lwu
+# CHECK: Macro fuse: {{.*}}AUIPC - LWU
+---
+name: auipc_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = AUIPC 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LWU %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: lui_lb
+# CHECK: Macro fuse: {{.*}}LUI - LB
+---
+name: lui_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = LUI 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LB %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: lui_lh
+# CHECK: Macro fuse: {{.*}}LUI - LH
+---
+name: lui_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = LUI 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LH %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: lui_lw
+# CHECK: Macro fuse: {{.*}}LUI - LW
+---
+name: lui_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = LUI 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LW %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: lui_ld
+# CHECK: Macro fuse: {{.*}}LUI - LD
+---
+name: lui_ld
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = LUI 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LD %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: lui_lbu
+# CHECK: Macro fuse: {{.*}}LUI - LBU
+---
+name: lui_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = LUI 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LBU %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: lui_lhu
+# CHECK: Macro fuse: {{.*}}LUI - LHU
+---
+name: lui_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = LUI 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LHU %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: lui_lwu
+# CHECK: Macro fuse: {{.*}}LUI - LWU
+---
+name: lui_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = LUI 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LWU %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK-BFEXT: bitfield_extract
+# CHECK-BFEXT: Macro fuse: {{.*}}SLLI - SRLI
+---
+name: bitfield_extract
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = SLLI %1, 31
+ %3:gpr = XORI %1, 3
+ %4:gpr = SRLI %2, 48
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: addi_lb
+# CHECK: Macro fuse: {{.*}}ADDI - LB
+---
+name: addi_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADDI %1, 8
+ %4:gpr = XORI %2, 3
+ %5:gpr = LB %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: addi_lh
+# CHECK: Macro fuse: {{.*}}ADDI - LH
+---
+name: addi_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADDI %1, 8
+ %4:gpr = XORI %2, 3
+ %5:gpr = LH %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: addi_lw
+# CHECK: Macro fuse: {{.*}}ADDI - LW
+---
+name: addi_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADDI %1, 8
+ %4:gpr = XORI %2, 3
+ %5:gpr = LW %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: addi_ld
+# CHECK: Macro fuse: {{.*}}ADDI - LD
+---
+name: addi_ld
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADDI %1, 8
+ %4:gpr = XORI %2, 3
+ %5:gpr = LD %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: addi_lbu
+# CHECK: Macro fuse: {{.*}}ADDI - LBU
+---
+name: addi_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADDI %1, 8
+ %4:gpr = XORI %2, 3
+ %5:gpr = LBU %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: addi_lhu
+# CHECK: Macro fuse: {{.*}}ADDI - LHU
+---
+name: addi_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADDI %1, 8
+ %4:gpr = XORI %2, 3
+ %5:gpr = LHU %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: addi_lwu
+# CHECK: Macro fuse: {{.*}}ADDI - LWU
+---
+name: addi_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADDI %1, 8
+ %4:gpr = XORI %2, 3
+ %5:gpr = LWU %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: adduw_lb
+# CHECK: Macro fuse: {{.*}}ADD_UW - LB
+---
+name: adduw_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LB %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: adduw_lh
+# CHECK: Macro fuse: {{.*}}ADD_UW - LH
+---
+name: adduw_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LH %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: adduw_lw
+# CHECK: Macro fuse: {{.*}}ADD_UW - LW
+---
+name: adduw_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LW %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: adduw_ld
+# CHECK: Macro fuse: {{.*}}ADD_UW - LD
+---
+name: adduw_ld
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LD %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: adduw_lbu
+# CHECK: Macro fuse: {{.*}}ADD_UW - LBU
+---
+name: adduw_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LBU %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: adduw_lhu
+# CHECK: Macro fuse: {{.*}}ADD_UW - LHU
+---
+name: adduw_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LHU %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: adduw_lwu
+# CHECK: Macro fuse: {{.*}}ADD_UW - LWU
+---
+name: adduw_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LWU %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1add_lb
+# CHECK: Macro fuse: {{.*}}SH1ADD - LB
+---
+name: sh1add_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LB %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2add_lb
+# CHECK: Macro fuse: {{.*}}SH2ADD - LB
+---
+name: sh2add_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LB %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3add_lb
+# CHECK: Macro fuse: {{.*}}SH3ADD - LB
+---
+name: sh3add_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LB %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1add_lh
+# CHECK: Macro fuse: {{.*}}SH1ADD - LH
+---
+name: sh1add_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LH %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2add_lh
+# CHECK: Macro fuse: {{.*}}SH2ADD - LH
+---
+name: sh2add_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LH %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3add_lh
+# CHECK: Macro fuse: {{.*}}SH3ADD - LH
+---
+name: sh3add_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LH %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1add_lw
+# CHECK: Macro fuse: {{.*}}SH1ADD - LW
+---
+name: sh1add_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LW %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2add_lw
+# CHECK: Macro fuse: {{.*}}SH2ADD - LW
+---
+name: sh2add_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LW %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3add_lw
+# CHECK: Macro fuse: {{.*}}SH3ADD - LW
+---
+name: sh3add_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LW %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1add_ld
+# CHECK: Macro fuse: {{.*}}SH1ADD - LD
+---
+name: sh1add_ld
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LD %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2add_ld
+# CHECK: Macro fuse: {{.*}}SH2ADD - LD
+---
+name: sh2add_ld
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LD %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3add_ld
+# CHECK: Macro fuse: {{.*}}SH3ADD - LD
+---
+name: sh3add_ld
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LD %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1add_lbu
+# CHECK: Macro fuse: {{.*}}SH1ADD - LBU
+---
+name: sh1add_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LBU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2add_lbu
+# CHECK: Macro fuse: {{.*}}SH2ADD - LBU
+---
+name: sh2add_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LBU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3add_lbu
+# CHECK: Macro fuse: {{.*}}SH3ADD - LBU
+---
+name: sh3add_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LBU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1add_lhu
+# CHECK: Macro fuse: {{.*}}SH1ADD - LHU
+---
+name: sh1add_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LHU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2add_lhu
+# CHECK: Macro fuse: {{.*}}SH2ADD - LHU
+---
+name: sh2add_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LHU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3add_lhu
+# CHECK: Macro fuse: {{.*}}SH3ADD - LHU
+---
+name: sh3add_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LHU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1add_lwu
+# CHECK: Macro fuse: {{.*}}SH1ADD - LWU
+---
+name: sh1add_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LWU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2add_lwu
+# CHECK: Macro fuse: {{.*}}SH2ADD - LWU
+---
+name: sh2add_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LWU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3add_lwu
+# CHECK: Macro fuse: {{.*}}SH3ADD - LWU
+---
+name: sh3add_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LWU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1adduw_lb
+# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LB
+---
+name: sh1adduw_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LB %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2adduw_lb
+# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LB
+---
+name: sh2adduw_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LB %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3adduw_lb
+# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LB
+---
+name: sh3adduw_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LB %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1adduw_lh
+# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LH
+---
+name: sh1adduw_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LH %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2adduw_lh
+# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LH
+---
+name: sh2adduw_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LH %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3adduw_lh
+# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LH
+---
+name: sh3adduw_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LH %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1adduw_lw
+# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LW
+---
+name: sh1adduw_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LW %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2adduw_lw
+# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LW
+---
+name: sh2adduw_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LW %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3adduw_lw
+# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LW
+---
+name: sh3adduw_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LW %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1adduw_ld
+# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LD
+---
+name: sh1adduw_ld
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LD %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2adduw_ld
+# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LD
+---
+name: sh2adduw_ld
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LD %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3adduw_ld
+# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LD
+---
+name: sh3adduw_ld
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LD %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1adduw_lbu
+# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LBU
+---
+name: sh1adduw_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LBU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2adduw_lbu
+# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LBU
+---
+name: sh2adduw_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LBU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3adduw_lbu
+# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LBU
+---
+name: sh3adduw_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LBU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1adduw_lhu
+# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LHU
+---
+name: sh1adduw_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LHU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2adduw_lhu
+# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LHU
+---
+name: sh2adduw_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LHU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3adduw_lhu
+# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LHU
+---
+name: sh3adduw_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LHU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1adduw_lwu
+# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LWU
+---
+name: sh1adduw_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LWU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2adduw_lwu
+# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LWU
+---
+name: sh2adduw_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LWU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3adduw_lwu
+# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LWU
+---
+name: sh3adduw_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LWU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
diff --git a/llvm/test/CodeGen/RISCV/misched-load-clustering.ll b/llvm/test/CodeGen/RISCV/misched-load-clustering.ll
index 160f0ae..abdc1ba 100644
--- a/llvm/test/CodeGen/RISCV/misched-load-clustering.ll
+++ b/llvm/test/CodeGen/RISCV/misched-load-clustering.ll
@@ -1,17 +1,42 @@
; REQUIRES: asserts
-; RUN: llc -mtriple=riscv32 -verify-misched -riscv-misched-load-store-clustering=false \
+;
+; Disable all misched clustering
+; RUN: llc -mtriple=riscv32 -verify-misched \
+; RUN: -mattr=+disable-misched-load-clustering,+disable-misched-store-clustering \
; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
; RUN: | FileCheck -check-prefix=NOCLUSTER %s
-; RUN: llc -mtriple=riscv64 -verify-misched -riscv-misched-load-store-clustering=false \
+; RUN: llc -mtriple=riscv64 -verify-misched \
+; RUN: -mattr=+disable-misched-load-clustering,+disable-misched-store-clustering \
; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
; RUN: | FileCheck -check-prefix=NOCLUSTER %s
+;
+; ST misched clustering only
+; RUN: llc -mtriple=riscv32 -verify-misched \
+; RUN: -mattr=+disable-misched-load-clustering \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=STCLUSTER %s
+; RUN: llc -mtriple=riscv64 -verify-misched \
+; RUN: -mattr=+disable-misched-load-clustering \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=STCLUSTER %s
+;
+; LD misched clustering only
; RUN: llc -mtriple=riscv32 -verify-misched \
+; RUN: -mattr=+disable-misched-store-clustering \
; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
; RUN: | FileCheck -check-prefix=LDCLUSTER %s
; RUN: llc -mtriple=riscv64 -verify-misched \
+; RUN: -mattr=+disable-misched-store-clustering \
; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
; RUN: | FileCheck -check-prefix=LDCLUSTER %s
-
+;
+; Default misched cluster settings (i.e. both LD and ST clustering)
+; RUN: llc -mtriple=riscv32 -verify-misched \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=DEFAULTCLUSTER %s
+; RUN: llc -mtriple=riscv64 -verify-misched \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=DEFAULTCLUSTER %s
define i32 @load_clustering_1(ptr nocapture %p) {
; NOCLUSTER: ********** MI Scheduling **********
@@ -22,6 +47,14 @@ define i32 @load_clustering_1(ptr nocapture %p) {
; NOCLUSTER: SU(4): %4:gpr = LW %0:gpr, 4
; NOCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16
;
+; STCLUSTER: ********** MI Scheduling **********
+; STCLUSTER-LABEL: load_clustering_1:%bb.0
+; STCLUSTER: *** Final schedule for %bb.0 ***
+; STCLUSTER: SU(1): %1:gpr = LW %0:gpr, 12
+; STCLUSTER: SU(2): %2:gpr = LW %0:gpr, 8
+; STCLUSTER: SU(4): %4:gpr = LW %0:gpr, 4
+; STCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16
+;
; LDCLUSTER: ********** MI Scheduling **********
; LDCLUSTER-LABEL: load_clustering_1:%bb.0
; LDCLUSTER: *** Final schedule for %bb.0 ***
@@ -29,6 +62,14 @@ define i32 @load_clustering_1(ptr nocapture %p) {
; LDCLUSTER: SU(2): %2:gpr = LW %0:gpr, 8
; LDCLUSTER: SU(1): %1:gpr = LW %0:gpr, 12
; LDCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16
+;
+; DEFAULTCLUSTER: ********** MI Scheduling **********
+; DEFAULTCLUSTER-LABEL: load_clustering_1:%bb.0
+; DEFAULTCLUSTER: *** Final schedule for %bb.0 ***
+; DEFAULTCLUSTER: SU(4): %4:gpr = LW %0:gpr, 4
+; DEFAULTCLUSTER: SU(2): %2:gpr = LW %0:gpr, 8
+; DEFAULTCLUSTER: SU(1): %1:gpr = LW %0:gpr, 12
+; DEFAULTCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16
entry:
%arrayidx0 = getelementptr inbounds i32, ptr %p, i32 3
%val0 = load i32, ptr %arrayidx0
diff --git a/llvm/test/CodeGen/RISCV/misched-mem-clustering.mir b/llvm/test/CodeGen/RISCV/misched-mem-clustering.mir
index 21398d3..01960f9 100644
--- a/llvm/test/CodeGen/RISCV/misched-mem-clustering.mir
+++ b/llvm/test/CodeGen/RISCV/misched-mem-clustering.mir
@@ -1,10 +1,12 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
# RUN: llc -mtriple=riscv64 -x mir -mcpu=sifive-p470 -verify-misched -enable-post-misched=false \
-# RUN: -riscv-postmisched-load-store-clustering=false -debug-only=machine-scheduler \
+# RUN: -mattr=+disable-postmisched-load-clustering \
+# RUN: -mattr=+disable-postmisched-store-clustering -debug-only=machine-scheduler \
# RUN: -start-before=machine-scheduler -stop-after=postmisched -misched-regpressure=false -o - 2>&1 < %s \
# RUN: | FileCheck -check-prefix=NOPOSTMISCHED %s
# RUN: llc -mtriple=riscv64 -x mir -mcpu=sifive-p470 -mattr=+use-postra-scheduler -verify-misched -enable-post-misched=true \
-# RUN: -riscv-postmisched-load-store-clustering=false -debug-only=machine-scheduler \
+# RUN: -mattr=+disable-postmisched-load-clustering \
+# RUN: -mattr=+disable-postmisched-store-clustering -debug-only=machine-scheduler \
# RUN: -start-before=machine-scheduler -stop-after=postmisched -misched-regpressure=false -o - 2>&1 < %s \
# RUN: | FileCheck -check-prefix=NOCLUSTER %s
# RUN: llc -mtriple=riscv64 -x mir -mcpu=sifive-p470 -mattr=+use-postra-scheduler -verify-misched -enable-post-misched=true \
diff --git a/llvm/test/CodeGen/RISCV/misched-store-clustering.ll b/llvm/test/CodeGen/RISCV/misched-store-clustering.ll
new file mode 100644
index 0000000..02e853d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/misched-store-clustering.ll
@@ -0,0 +1,83 @@
+; REQUIRES: asserts
+;
+; Disable all misched clustering
+; RUN: llc -mtriple=riscv32 -verify-misched \
+; RUN: -mattr=+disable-misched-load-clustering,+disable-misched-store-clustering \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=NOCLUSTER %s
+; RUN: llc -mtriple=riscv64 -verify-misched \
+; RUN: -mattr=+disable-misched-load-clustering,+disable-misched-store-clustering \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=NOCLUSTER %s
+;
+; ST misched clustering only
+; RUN: llc -mtriple=riscv32 -verify-misched \
+; RUN: -mattr=+disable-misched-load-clustering \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=STCLUSTER %s
+; RUN: llc -mtriple=riscv64 -verify-misched \
+; RUN: -mattr=+disable-misched-load-clustering \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=STCLUSTER %s
+;
+; LD misched clustering only
+; RUN: llc -mtriple=riscv32 -verify-misched \
+; RUN: -mattr=+disable-misched-store-clustering \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=LDCLUSTER %s
+; RUN: llc -mtriple=riscv64 -verify-misched \
+; RUN: -mattr=+disable-misched-store-clustering \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=LDCLUSTER %s
+;
+; Default misched cluster settings (i.e. both LD and ST clustering)
+; RUN: llc -mtriple=riscv32 -verify-misched \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=DEFAULTCLUSTER %s
+; RUN: llc -mtriple=riscv64 -verify-misched \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=DEFAULTCLUSTER %s
+
+define i32 @store_clustering_1(ptr nocapture %p, i32 %v) {
+; NOCLUSTER: ********** MI Scheduling **********
+; NOCLUSTER-LABEL: store_clustering_1:%bb.0
+; NOCLUSTER: *** Final schedule for %bb.0 ***
+; NOCLUSTER: SU(2): SW %1:gpr, %0:gpr, 12 :: (store (s32) into %ir.arrayidx0)
+; NOCLUSTER: SU(3): SW %1:gpr, %0:gpr, 8 :: (store (s32) into %ir.arrayidx1)
+; NOCLUSTER: SU(4): SW %1:gpr, %0:gpr, 4 :: (store (s32) into %ir.arrayidx2)
+; NOCLUSTER: SU(5): SW %1:gpr, %0:gpr, 16 :: (store (s32) into %ir.arrayidx3)
+;
+; STCLUSTER: ********** MI Scheduling **********
+; STCLUSTER-LABEL: store_clustering_1:%bb.0
+; STCLUSTER: *** Final schedule for %bb.0 ***
+; STCLUSTER: SU(4): SW %1:gpr, %0:gpr, 4 :: (store (s32) into %ir.arrayidx2)
+; STCLUSTER: SU(3): SW %1:gpr, %0:gpr, 8 :: (store (s32) into %ir.arrayidx1)
+; STCLUSTER: SU(2): SW %1:gpr, %0:gpr, 12 :: (store (s32) into %ir.arrayidx0)
+; STCLUSTER: SU(5): SW %1:gpr, %0:gpr, 16 :: (store (s32) into %ir.arrayidx3)
+;
+; LDCLUSTER: ********** MI Scheduling **********
+; LDCLUSTER-LABEL: store_clustering_1:%bb.0
+; LDCLUSTER: *** Final schedule for %bb.0 ***
+; LDCLUSTER: SU(2): SW %1:gpr, %0:gpr, 12 :: (store (s32) into %ir.arrayidx0)
+; LDCLUSTER: SU(3): SW %1:gpr, %0:gpr, 8 :: (store (s32) into %ir.arrayidx1)
+; LDCLUSTER: SU(4): SW %1:gpr, %0:gpr, 4 :: (store (s32) into %ir.arrayidx2)
+; LDCLUSTER: SU(5): SW %1:gpr, %0:gpr, 16 :: (store (s32) into %ir.arrayidx3)
+;
+; DEFAULTCLUSTER: ********** MI Scheduling **********
+; DEFAULTCLUSTER-LABEL: store_clustering_1:%bb.0
+; DEFAULTCLUSTER: *** Final schedule for %bb.0 ***
+; DEFAULTCLUSTER: SU(4): SW %1:gpr, %0:gpr, 4 :: (store (s32) into %ir.arrayidx2)
+; DEFAULTCLUSTER: SU(3): SW %1:gpr, %0:gpr, 8 :: (store (s32) into %ir.arrayidx1)
+; DEFAULTCLUSTER: SU(2): SW %1:gpr, %0:gpr, 12 :: (store (s32) into %ir.arrayidx0)
+; DEFAULTCLUSTER: SU(5): SW %1:gpr, %0:gpr, 16 :: (store (s32) into %ir.arrayidx3)
+entry:
+ %arrayidx0 = getelementptr inbounds i32, ptr %p, i32 3
+ store i32 %v, ptr %arrayidx0
+ %arrayidx1 = getelementptr inbounds i32, ptr %p, i32 2
+ store i32 %v, ptr %arrayidx1
+ %arrayidx2 = getelementptr inbounds i32, ptr %p, i32 1
+ store i32 %v, ptr %arrayidx2
+ %arrayidx3 = getelementptr inbounds i32, ptr %p, i32 4
+ store i32 %v, ptr %arrayidx3
+ ret i32 %v
+}
diff --git a/llvm/test/CodeGen/RISCV/rv32zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbkb.ll
index 4aa6dd4..42d326e 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbkb.ll
@@ -319,3 +319,142 @@ define i64 @zext_i16_to_i64(i16 %a) nounwind {
%1 = zext i16 %a to i64
ret i64 %1
}
+
+define i32 @pack_lo_packh_hi_packh(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2, i8 zeroext %3) nounwind {
+; RV32I-LABEL: pack_lo_packh_hi_packh:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a1, 8
+; RV32I-NEXT: slli a2, a2, 16
+; RV32I-NEXT: slli a3, a3, 24
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: ret
+;
+; RV32ZBKB-LABEL: pack_lo_packh_hi_packh:
+; RV32ZBKB: # %bb.0:
+; RV32ZBKB-NEXT: packh a0, a0, a1
+; RV32ZBKB-NEXT: packh a1, a2, a3
+; RV32ZBKB-NEXT: pack a0, a0, a1
+; RV32ZBKB-NEXT: ret
+ %a = zext i8 %0 to i32
+ %b = zext i8 %1 to i32
+ %c = zext i8 %2 to i32
+ %d = zext i8 %3 to i32
+ %e = shl i32 %b, 8
+ %f = shl i32 %c, 16
+ %g = shl i32 %d, 24
+ %h = or i32 %a, %e
+ %i = or i32 %h, %f
+ %j = or i32 %i, %g
+ ret i32 %j
+}
+
+define i32 @pack_lo_packh_hi_packh_2(i8 %0, i8 %1, i8 %2, i8 %3) nounwind {
+; RV32I-LABEL: pack_lo_packh_hi_packh_2:
+; RV32I: # %bb.0:
+; RV32I-NEXT: zext.b a0, a0
+; RV32I-NEXT: zext.b a1, a1
+; RV32I-NEXT: zext.b a2, a2
+; RV32I-NEXT: slli a3, a3, 24
+; RV32I-NEXT: slli a1, a1, 8
+; RV32I-NEXT: slli a2, a2, 16
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: ret
+;
+; RV32ZBKB-LABEL: pack_lo_packh_hi_packh_2:
+; RV32ZBKB: # %bb.0:
+; RV32ZBKB-NEXT: packh a0, a0, a1
+; RV32ZBKB-NEXT: packh a1, a2, a3
+; RV32ZBKB-NEXT: pack a0, a0, a1
+; RV32ZBKB-NEXT: ret
+ %a = zext i8 %0 to i32
+ %b = zext i8 %1 to i32
+ %c = zext i8 %2 to i32
+ %d = zext i8 %3 to i32
+ %e = shl i32 %b, 8
+ %f = shl i32 %c, 16
+ %g = shl i32 %d, 24
+ %h = or i32 %a, %e
+ %i = or i32 %h, %f
+ %j = or i32 %i, %g
+ ret i32 %j
+}
+
+define i32 @pack_lo_zext_hi_packh(i16 zeroext %0, i8 zeroext %1, i8 zeroext %2) nounwind {
+; RV32I-LABEL: pack_lo_zext_hi_packh:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a1, 16
+; RV32I-NEXT: slli a2, a2, 24
+; RV32I-NEXT: or a1, a2, a1
+; RV32I-NEXT: or a0, a1, a0
+; RV32I-NEXT: ret
+;
+; RV32ZBKB-LABEL: pack_lo_zext_hi_packh:
+; RV32ZBKB: # %bb.0:
+; RV32ZBKB-NEXT: packh a1, a1, a2
+; RV32ZBKB-NEXT: pack a0, a0, a1
+; RV32ZBKB-NEXT: ret
+ %a = zext i16 %0 to i32
+ %b = zext i8 %1 to i32
+ %c = zext i8 %2 to i32
+ %d = shl i32 %c, 8
+ %e = or i32 %b, %d
+ %f = shl i32 %e, 16
+ %g = or i32 %f, %a
+ ret i32 %g
+}
+
+; Negative test, %a isn't extended so we can't use pack for the outer or, but
+; we can use packh for the high half.
+define i32 @pack_lo_noext_hi_packh(i32 %a, i8 zeroext %1, i8 zeroext %2) nounwind {
+; RV32I-LABEL: pack_lo_noext_hi_packh:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a1, 16
+; RV32I-NEXT: slli a2, a2, 24
+; RV32I-NEXT: or a1, a2, a1
+; RV32I-NEXT: or a0, a1, a0
+; RV32I-NEXT: ret
+;
+; RV32ZBKB-LABEL: pack_lo_noext_hi_packh:
+; RV32ZBKB: # %bb.0:
+; RV32ZBKB-NEXT: packh a1, a1, a2
+; RV32ZBKB-NEXT: slli a1, a1, 16
+; RV32ZBKB-NEXT: or a0, a1, a0
+; RV32ZBKB-NEXT: ret
+ %b = zext i8 %1 to i32
+ %c = zext i8 %2 to i32
+ %d = shl i32 %c, 8
+ %e = or i32 %b, %d
+ %f = shl i32 %e, 16
+ %g = or i32 %f, %a
+ ret i32 %g
+}
+
+; Make sure we can match packh+slli without having the input bytes zero extended.
+define i32 @pack_lo_noext_hi_packh_nozeroext(i32 %a, i8 %1, i8 %2) nounwind {
+; RV32I-LABEL: pack_lo_noext_hi_packh_nozeroext:
+; RV32I: # %bb.0:
+; RV32I-NEXT: zext.b a1, a1
+; RV32I-NEXT: slli a2, a2, 24
+; RV32I-NEXT: slli a1, a1, 16
+; RV32I-NEXT: or a0, a2, a0
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32ZBKB-LABEL: pack_lo_noext_hi_packh_nozeroext:
+; RV32ZBKB: # %bb.0:
+; RV32ZBKB-NEXT: packh a1, a1, a2
+; RV32ZBKB-NEXT: slli a1, a1, 16
+; RV32ZBKB-NEXT: or a0, a1, a0
+; RV32ZBKB-NEXT: ret
+ %b = zext i8 %1 to i32
+ %c = zext i8 %2 to i32
+ %d = shl i32 %c, 8
+ %e = or i32 %b, %d
+ %f = shl i32 %e, 16
+ %g = or i32 %f, %a
+ ret i32 %g
+}
diff --git a/llvm/test/CodeGen/RISCV/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbkb.ll
index 818ea72..f2c41db 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbkb.ll
@@ -392,3 +392,217 @@ define i64 @zext_i16_to_i64(i16 %a) nounwind {
%1 = zext i16 %a to i64
ret i64 %1
}
+
+define void @pack_lo_packh_hi_packh(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2, i8 zeroext %3, ptr %p) nounwind {
+; RV64I-LABEL: pack_lo_packh_hi_packh:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 8
+; RV64I-NEXT: slli a2, a2, 16
+; RV64I-NEXT: slli a3, a3, 24
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: or a2, a2, a3
+; RV64I-NEXT: or a0, a0, a2
+; RV64I-NEXT: sw a0, 0(a4)
+; RV64I-NEXT: ret
+;
+; RV64ZBKB-LABEL: pack_lo_packh_hi_packh:
+; RV64ZBKB: # %bb.0:
+; RV64ZBKB-NEXT: packh a0, a0, a1
+; RV64ZBKB-NEXT: packh a1, a2, a3
+; RV64ZBKB-NEXT: packw a0, a0, a1
+; RV64ZBKB-NEXT: sw a0, 0(a4)
+; RV64ZBKB-NEXT: ret
+ %a = zext i8 %0 to i32
+ %b = zext i8 %1 to i32
+ %c = zext i8 %2 to i32
+ %d = zext i8 %3 to i32
+ %e = shl i32 %b, 8
+ %f = shl i32 %c, 16
+ %g = shl i32 %d, 24
+ %h = or i32 %a, %e
+ %i = or i32 %h, %f
+ %j = or i32 %i, %g
+ store i32 %j, ptr %p
+ ret void
+}
+
+define void @pack_lo_packh_hi_packh_2(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2, i8 zeroext %3, ptr %p) nounwind {
+; RV64I-LABEL: pack_lo_packh_hi_packh_2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 8
+; RV64I-NEXT: slli a2, a2, 16
+; RV64I-NEXT: slli a3, a3, 24
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: or a2, a2, a3
+; RV64I-NEXT: or a0, a2, a0
+; RV64I-NEXT: sw a0, 0(a4)
+; RV64I-NEXT: ret
+;
+; RV64ZBKB-LABEL: pack_lo_packh_hi_packh_2:
+; RV64ZBKB: # %bb.0:
+; RV64ZBKB-NEXT: packh a0, a0, a1
+; RV64ZBKB-NEXT: packh a1, a3, a2
+; RV64ZBKB-NEXT: packw a0, a0, a1
+; RV64ZBKB-NEXT: sw a0, 0(a4)
+; RV64ZBKB-NEXT: ret
+ %a = zext i8 %0 to i32
+ %b = zext i8 %1 to i32
+ %c = zext i8 %2 to i32
+ %d = zext i8 %3 to i32
+ %e = shl i32 %b, 8
+ %f = shl i32 %c, 16
+ %g = shl i32 %d, 24
+ %h = or i32 %a, %e
+ %i = or i32 %g, %h
+ %j = or i32 %f, %i
+ store i32 %j, ptr %p
+ ret void
+}
+
+define void @pack_lo_packh_hi_packh_3(i8 %0, i8 %1, i8 %2, i8 %3, ptr %p) nounwind {
+; RV64I-LABEL: pack_lo_packh_hi_packh_3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: zext.b a0, a0
+; RV64I-NEXT: zext.b a1, a1
+; RV64I-NEXT: zext.b a2, a2
+; RV64I-NEXT: slli a3, a3, 24
+; RV64I-NEXT: slli a1, a1, 8
+; RV64I-NEXT: slli a2, a2, 16
+; RV64I-NEXT: or a0, a3, a0
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: or a0, a2, a0
+; RV64I-NEXT: sw a0, 0(a4)
+; RV64I-NEXT: ret
+;
+; RV64ZBKB-LABEL: pack_lo_packh_hi_packh_3:
+; RV64ZBKB: # %bb.0:
+; RV64ZBKB-NEXT: packh a0, a0, a1
+; RV64ZBKB-NEXT: packh a1, a3, a2
+; RV64ZBKB-NEXT: packw a0, a0, a1
+; RV64ZBKB-NEXT: sw a0, 0(a4)
+; RV64ZBKB-NEXT: ret
+ %a = zext i8 %0 to i32
+ %b = zext i8 %1 to i32
+ %c = zext i8 %2 to i32
+ %d = zext i8 %3 to i32
+ %e = shl i32 %b, 8
+ %f = shl i32 %c, 16
+ %g = shl i32 %d, 24
+ %h = or i32 %a, %e
+ %i = or i32 %g, %h
+ %j = or i32 %f, %i
+ store i32 %j, ptr %p
+ ret void
+}
+
+define void @pack_lo_zext_hi_packh(i16 zeroext %0, i8 zeroext %1, i8 zeroext %2, ptr %p) nounwind {
+; RV64I-LABEL: pack_lo_zext_hi_packh:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 16
+; RV64I-NEXT: slli a2, a2, 24
+; RV64I-NEXT: or a1, a2, a1
+; RV64I-NEXT: or a0, a1, a0
+; RV64I-NEXT: sw a0, 0(a3)
+; RV64I-NEXT: ret
+;
+; RV64ZBKB-LABEL: pack_lo_zext_hi_packh:
+; RV64ZBKB: # %bb.0:
+; RV64ZBKB-NEXT: packh a1, a1, a2
+; RV64ZBKB-NEXT: packw a0, a0, a1
+; RV64ZBKB-NEXT: sw a0, 0(a3)
+; RV64ZBKB-NEXT: ret
+ %a = zext i16 %0 to i32
+ %b = zext i8 %1 to i32
+ %c = zext i8 %2 to i32
+ %d = shl i32 %c, 8
+ %e = or i32 %b, %d
+ %f = shl i32 %e, 16
+ %g = or i32 %f, %a
+ store i32 %g, ptr %p
+ ret void
+}
+
+; Negative test, %a isn't extended so we can't use packw for the outer or, but
+; we can use packh for the high half.
+define void @pack_lo_noext_hi_packh(i32 %a, i8 zeroext %1, i8 zeroext %2, ptr %p) nounwind {
+; RV64I-LABEL: pack_lo_noext_hi_packh:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 16
+; RV64I-NEXT: slli a2, a2, 24
+; RV64I-NEXT: or a1, a2, a1
+; RV64I-NEXT: or a0, a1, a0
+; RV64I-NEXT: sw a0, 0(a3)
+; RV64I-NEXT: ret
+;
+; RV64ZBKB-LABEL: pack_lo_noext_hi_packh:
+; RV64ZBKB: # %bb.0:
+; RV64ZBKB-NEXT: packh a1, a1, a2
+; RV64ZBKB-NEXT: slli a1, a1, 16
+; RV64ZBKB-NEXT: or a0, a1, a0
+; RV64ZBKB-NEXT: sw a0, 0(a3)
+; RV64ZBKB-NEXT: ret
+ %b = zext i8 %1 to i32
+ %c = zext i8 %2 to i32
+ %d = shl i32 %c, 8
+ %e = or i32 %b, %d
+ %f = shl i32 %e, 16
+ %g = or i32 %f, %a
+ store i32 %g, ptr %p
+ ret void
+}
+
+; Make sure we can match packh+slli without having the input bytes zero extended.
+define void @pack_i32_lo_noext_hi_packh_nozeroext(i32 %a, i8 %1, i8 %2, ptr %p) nounwind {
+; RV64I-LABEL: pack_i32_lo_noext_hi_packh_nozeroext:
+; RV64I: # %bb.0:
+; RV64I-NEXT: zext.b a1, a1
+; RV64I-NEXT: slli a2, a2, 24
+; RV64I-NEXT: slli a1, a1, 16
+; RV64I-NEXT: or a0, a2, a0
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: sw a0, 0(a3)
+; RV64I-NEXT: ret
+;
+; RV64ZBKB-LABEL: pack_i32_lo_noext_hi_packh_nozeroext:
+; RV64ZBKB: # %bb.0:
+; RV64ZBKB-NEXT: packh a1, a1, a2
+; RV64ZBKB-NEXT: slli a1, a1, 16
+; RV64ZBKB-NEXT: or a0, a1, a0
+; RV64ZBKB-NEXT: sw a0, 0(a3)
+; RV64ZBKB-NEXT: ret
+ %b = zext i8 %1 to i32
+ %c = zext i8 %2 to i32
+ %d = shl i32 %c, 8
+ %e = or i32 %b, %d
+ %f = shl i32 %e, 16
+ %g = or i32 %f, %a
+ store i32 %g, ptr %p
+ ret void
+}
+
+; Make sure we can match packh+slli without having the input bytes zero extended.
+define i64 @pack_i64_lo_noext_hi_packh_nozeroext(i64 %a, i8 %1, i8 %2, ptr %p) nounwind {
+; RV64I-LABEL: pack_i64_lo_noext_hi_packh_nozeroext:
+; RV64I: # %bb.0:
+; RV64I-NEXT: zext.b a1, a1
+; RV64I-NEXT: zext.b a2, a2
+; RV64I-NEXT: slli a1, a1, 16
+; RV64I-NEXT: slli a2, a2, 24
+; RV64I-NEXT: or a1, a2, a1
+; RV64I-NEXT: or a0, a1, a0
+; RV64I-NEXT: ret
+;
+; RV64ZBKB-LABEL: pack_i64_lo_noext_hi_packh_nozeroext:
+; RV64ZBKB: # %bb.0:
+; RV64ZBKB-NEXT: packh a1, a1, a2
+; RV64ZBKB-NEXT: slli a1, a1, 16
+; RV64ZBKB-NEXT: or a0, a1, a0
+; RV64ZBKB-NEXT: ret
+ %b = zext i8 %1 to i64
+ %c = zext i8 %2 to i64
+ %d = shl i64 %c, 8
+ %e = or i64 %b, %d
+ %f = shl i64 %e, 16
+ %g = or i64 %f, %a
+ ret i64 %g
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vploadff.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vploadff.ll
new file mode 100644
index 0000000..5b01976
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vploadff.ll
@@ -0,0 +1,586 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zvfbfmin,+v \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zvfbfmin,+v \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+zvfbfmin,+v \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+
+define { <2 x i8>, i32 } @vploadff_v2i8(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <2 x i8>, i32 } @llvm.vp.load.ff.v2i8.p0(ptr %ptr, <2 x i1> %m, i32 %evl)
+ ret { <2 x i8>, i32 } %load
+}
+
+define { <2 x i8>, i32 } @vploadff_v2i8_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v2i8_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <2 x i8>, i32 } @llvm.vp.load.ff.v2i8.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl)
+ ret { <2 x i8>, i32 } %load
+}
+
+define { <4 x i8>, i32 } @vploadff_v4i8(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <4 x i8>, i32 } @llvm.vp.load.ff.v4i8.p0(ptr %ptr, <4 x i1> %m, i32 %evl)
+ ret { <4 x i8>, i32 } %load
+}
+
+define { <4 x i8>, i32 } @vploadff_v4i8_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v4i8_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <4 x i8>, i32 } @llvm.vp.load.ff.v4i8.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl)
+ ret { <4 x i8>, i32 } %load
+}
+
+define { <8 x i8>, i32 } @vploadff_v8i8(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <8 x i8>, i32 } @llvm.vp.load.ff.v8i8.p0(ptr %ptr, <8 x i1> %m, i32 %evl)
+ ret { <8 x i8>, i32 } %load
+}
+
+define { <8 x i8>, i32 } @vploadff_v8i8_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v8i8_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <8 x i8>, i32 } @llvm.vp.load.ff.v8i8.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl)
+ ret { <8 x i8>, i32 } %load
+}
+
+define { <2 x i16>, i32 } @vploadff_v2i16(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <2 x i16>, i32 } @llvm.vp.load.ff.v2i16.p0(ptr %ptr, <2 x i1> %m, i32 %evl)
+ ret { <2 x i16>, i32 } %load
+}
+
+define { <2 x i16>, i32 } @vploadff_v2i16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v2i16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <2 x i16>, i32 } @llvm.vp.load.ff.v2i16.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl)
+ ret { <2 x i16>, i32 } %load
+}
+
+define { <4 x i16>, i32 } @vploadff_v4i16(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <4 x i16>, i32 } @llvm.vp.load.ff.v4i16.p0(ptr %ptr, <4 x i1> %m, i32 %evl)
+ ret { <4 x i16>, i32 } %load
+}
+
+define { <4 x i16>, i32 } @vploadff_v4i16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v4i16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <4 x i16>, i32 } @llvm.vp.load.ff.v4i16.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl)
+ ret { <4 x i16>, i32 } %load
+}
+
+define { <8 x i16>, i32 } @vploadff_v8i16(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <8 x i16>, i32 } @llvm.vp.load.ff.v8i16.p0(ptr %ptr, <8 x i1> %m, i32 %evl)
+ ret { <8 x i16>, i32 } %load
+}
+
+define { <8 x i16>, i32 } @vploadff_v8i16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v8i16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <8 x i16>, i32 } @llvm.vp.load.ff.v8i16.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl)
+ ret { <8 x i16>, i32 } %load
+}
+
+define { <2 x i32>, i32 } @vploadff_v2i32(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <2 x i32>, i32 } @llvm.vp.load.ff.v2i32.p0(ptr %ptr, <2 x i1> %m, i32 %evl)
+ ret { <2 x i32>, i32 } %load
+}
+
+define { <2 x i32>, i32 } @vploadff_v2i32_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v2i32_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <2 x i32>, i32 } @llvm.vp.load.ff.v2i32.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl)
+ ret { <2 x i32>, i32 } %load
+}
+
+define { <4 x i32>, i32 } @vploadff_v4i32(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <4 x i32>, i32 } @llvm.vp.load.ff.v4i32.p0(ptr %ptr, <4 x i1> %m, i32 %evl)
+ ret { <4 x i32>, i32 } %load
+}
+
+define { <4 x i32>, i32 } @vploadff_v4i32_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v4i32_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <4 x i32>, i32 } @llvm.vp.load.ff.v4i32.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl)
+ ret { <4 x i32>, i32 } %load
+}
+
+define { <8 x i32>, i32 } @vploadff_v8i32(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <8 x i32>, i32 } @llvm.vp.load.ff.v8i32.p0(ptr %ptr, <8 x i1> %m, i32 %evl)
+ ret { <8 x i32>, i32 } %load
+}
+
+define { <8 x i32>, i32 } @vploadff_v8i32_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v8i32_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <8 x i32>, i32 } @llvm.vp.load.ff.v8i32.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl)
+ ret { <8 x i32>, i32 } %load
+}
+
+define { <2 x i64>, i32 } @vploadff_v2i64(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <2 x i64>, i32 } @llvm.vp.load.ff.v2i64.p0(ptr %ptr, <2 x i1> %m, i32 %evl)
+ ret { <2 x i64>, i32 } %load
+}
+
+define { <2 x i64>, i32 } @vploadff_v2i64_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v2i64_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <2 x i64>, i32 } @llvm.vp.load.ff.v2i64.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl)
+ ret { <2 x i64>, i32 } %load
+}
+
+define { <4 x i64>, i32 } @vploadff_v4i64(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <4 x i64>, i32 } @llvm.vp.load.ff.v4i64.p0(ptr %ptr, <4 x i1> %m, i32 %evl)
+ ret { <4 x i64>, i32 } %load
+}
+
+define { <4 x i64>, i32 } @vploadff_v4i64_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v4i64_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <4 x i64>, i32 } @llvm.vp.load.ff.v4i64.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl)
+ ret { <4 x i64>, i32 } %load
+}
+
+define { <8 x i64>, i32 } @vploadff_v8i64(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <8 x i64>, i32 } @llvm.vp.load.ff.v8i64.p0(ptr %ptr, <8 x i1> %m, i32 %evl)
+ ret { <8 x i64>, i32 } %load
+}
+
+define { <8 x i64>, i32 } @vploadff_v8i64_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v8i64_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <8 x i64>, i32 } @llvm.vp.load.ff.v8i64.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl)
+ ret { <8 x i64>, i32 } %load
+}
+
+define { <32 x i64>, i32 } @vploadff_v32i64(ptr %ptr, <32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v32i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a3, 16
+; CHECK-NEXT: bltu a2, a3, .LBB24_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: .LBB24_2:
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a1), v0.t
+; CHECK-NEXT: csrr a1, vl
+; CHECK-NEXT: sw a1, 256(a0)
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
+ %load = call { <32 x i64>, i32 } @llvm.vp.load.ff.v32i64.p0(ptr %ptr, <32 x i1> %m, i32 %evl)
+ ret { <32 x i64>, i32 } %load
+}
+
+define { <32 x i64>, i32 } @vploadff_v32i64_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v32i64_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a3, 16
+; CHECK-NEXT: bltu a2, a3, .LBB25_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: .LBB25_2:
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a1)
+; CHECK-NEXT: csrr a1, vl
+; CHECK-NEXT: sw a1, 256(a0)
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
+ %load = call { <32 x i64>, i32 } @llvm.vp.load.ff.v32i64.p0(ptr %ptr, <32 x i1> splat (i1 true), i32 %evl)
+ ret { <32 x i64>, i32 } %load
+}
+
+define { <2 x half>, i32 } @vploadff_v2f16(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <2 x half>, i32 } @llvm.vp.load.ff.v2f16.p0(ptr %ptr, <2 x i1> %m, i32 %evl)
+ ret { <2 x half>, i32 } %load
+}
+
+define { <2 x half>, i32 } @vploadff_v2f16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v2f16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <2 x half>, i32 } @llvm.vp.load.ff.v2f16.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl)
+ ret { <2 x half>, i32 } %load
+}
+
+define { <4 x half>, i32 } @vploadff_v4f16(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <4 x half>, i32 } @llvm.vp.load.ff.v4f16.p0(ptr %ptr, <4 x i1> %m, i32 %evl)
+ ret { <4 x half>, i32 } %load
+}
+
+define { <4 x half>, i32 } @vploadff_v4f16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v4f16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <4 x half>, i32 } @llvm.vp.load.ff.v4f16.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl)
+ ret { <4 x half>, i32 } %load
+}
+
+define { <8 x half>, i32 } @vploadff_v8f16(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <8 x half>, i32 } @llvm.vp.load.ff.v8f16.p0(ptr %ptr, <8 x i1> %m, i32 %evl)
+ ret { <8 x half>, i32 } %load
+}
+
+define { <8 x half>, i32 } @vploadff_v8f16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v8f16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <8 x half>, i32 } @llvm.vp.load.ff.v8f16.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl)
+ ret { <8 x half>, i32 } %load
+}
+
+define { <2 x float>, i32 } @vploadff_v2f32(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <2 x float>, i32 } @llvm.vp.load.ff.v2f32.p0(ptr %ptr, <2 x i1> %m, i32 %evl)
+ ret { <2 x float>, i32 } %load
+}
+
+define { <2 x float>, i32 } @vploadff_v2f32_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v2f32_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <2 x float>, i32 } @llvm.vp.load.ff.v2f32.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl)
+ ret { <2 x float>, i32 } %load
+}
+
+define { <4 x float>, i32 } @vploadff_v4f32(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <4 x float>, i32 } @llvm.vp.load.ff.v4f32.p0(ptr %ptr, <4 x i1> %m, i32 %evl)
+ ret { <4 x float>, i32 } %load
+}
+
+define { <4 x float>, i32 } @vploadff_v4f32_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v4f32_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <4 x float>, i32 } @llvm.vp.load.ff.v4f32.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl)
+ ret { <4 x float>, i32 } %load
+}
+
+define { <8 x float>, i32 } @vploadff_v8f32(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <8 x float>, i32 } @llvm.vp.load.ff.v8f32.p0(ptr %ptr, <8 x i1> %m, i32 %evl)
+ ret { <8 x float>, i32 } %load
+}
+
+define { <8 x float>, i32 } @vploadff_v8f32_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v8f32_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <8 x float>, i32 } @llvm.vp.load.ff.v8f32.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl)
+ ret { <8 x float>, i32 } %load
+}
+
+define { <2 x double>, i32 } @vploadff_v2f64(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <2 x double>, i32 } @llvm.vp.load.ff.v2f64.p0(ptr %ptr, <2 x i1> %m, i32 %evl)
+ ret { <2 x double>, i32 } %load
+}
+
+define { <2 x double>, i32 } @vploadff_v2f64_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v2f64_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <2 x double>, i32 } @llvm.vp.load.ff.v2f64.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl)
+ ret { <2 x double>, i32 } %load
+}
+
+define { <4 x double>, i32 } @vploadff_v4f64(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <4 x double>, i32 } @llvm.vp.load.ff.v4f64.p0(ptr %ptr, <4 x i1> %m, i32 %evl)
+ ret { <4 x double>, i32 } %load
+}
+
+define { <4 x double>, i32 } @vploadff_v4f64_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v4f64_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <4 x double>, i32 } @llvm.vp.load.ff.v4f64.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl)
+ ret { <4 x double>, i32 } %load
+}
+
+define { <8 x double>, i32 } @vploadff_v8f64(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <8 x double>, i32 } @llvm.vp.load.ff.v8f64.p0(ptr %ptr, <8 x i1> %m, i32 %evl)
+ ret { <8 x double>, i32 } %load
+}
+
+define { <8 x double>, i32 } @vploadff_v8f64_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v8f64_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <8 x double>, i32 } @llvm.vp.load.ff.v8f64.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl)
+ ret { <8 x double>, i32 } %load
+}
+
+define { <2 x bfloat>, i32 } @vploadff_v2bf16(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v2bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <2 x bfloat>, i32 } @llvm.vp.load.ff.v2bf16.p0(ptr %ptr, <2 x i1> %m, i32 %evl)
+ ret { <2 x bfloat>, i32 } %load
+}
+
+define { <2 x bfloat>, i32 } @vploadff_v2bf16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v2bf16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <2 x bfloat>, i32 } @llvm.vp.load.ff.v2bf16.p0(ptr %ptr, <2 x i1> splat (i1 true), i32 %evl)
+ ret { <2 x bfloat>, i32 } %load
+}
+
+define { <4 x bfloat>, i32 } @vploadff_v4bf16(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v4bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <4 x bfloat>, i32 } @llvm.vp.load.ff.v4bf16.p0(ptr %ptr, <4 x i1> %m, i32 %evl)
+ ret { <4 x bfloat>, i32 } %load
+}
+
+define { <4 x bfloat>, i32 } @vploadff_v4bf16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v4bf16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <4 x bfloat>, i32 } @llvm.vp.load.ff.v4bf16.p0(ptr %ptr, <4 x i1> splat (i1 true), i32 %evl)
+ ret { <4 x bfloat>, i32 } %load
+}
+
+define { <8 x bfloat>, i32 } @vploadff_v8bf16(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <8 x bfloat>, i32 } @llvm.vp.load.ff.v8bf16.p0(ptr %ptr, <8 x i1> %m, i32 %evl)
+ ret { <8 x bfloat>, i32 } %load
+}
+
+define { <8 x bfloat>, i32 } @vploadff_v8bf16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v8bf16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <8 x bfloat>, i32 } @llvm.vp.load.ff.v8bf16.p0(ptr %ptr, <8 x i1> splat (i1 true), i32 %evl)
+ ret { <8 x bfloat>, i32 } %load
+}
+
+define { <7 x i8>, i32 } @vploadff_v7i8(ptr %ptr, <7 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_v7i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <7 x i8>, i32 } @llvm.vp.load.ff.v7i8.p0(ptr %ptr, <7 x i1> %m, i32 %evl)
+ ret { <7 x i8>, i32 } %load
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vploadff.ll b/llvm/test/CodeGen/RISCV/rvv/vploadff.ll
new file mode 100644
index 0000000..9e08938
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vploadff.ll
@@ -0,0 +1,1008 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zvfbfmin,+v \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zvfbfmin,+v \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+zvfbfmin,+v \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+
+define { <vscale x 1 x i8>, i32 } @vploadff_nxv1i8(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 1 x i8>, i32 } @llvm.vp.load.ff.nxv1i8.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
+ ret { <vscale x 1 x i8>, i32 } %load
+}
+
+define { <vscale x 1 x i8>, i32 } @vploadff_nxv1i8_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv1i8_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 1 x i8>, i32 } @llvm.vp.load.ff.nxv1i8.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 1 x i8>, i32 } %load
+}
+
+define { <vscale x 2 x i8>, i32 } @vploadff_nxv2i8(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 2 x i8>, i32 } @llvm.vp.load.ff.nxv2i8.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
+ ret { <vscale x 2 x i8>, i32 } %load
+}
+
+define { <vscale x 2 x i8>, i32 } @vploadff_nxv2i8_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv2i8_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 2 x i8>, i32 } @llvm.vp.load.ff.nxv2i8.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 2 x i8>, i32 } %load
+}
+
+define { <vscale x 4 x i8>, i32 } @vploadff_nxv4i8(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 4 x i8>, i32 } @llvm.vp.load.ff.nxv4i8.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
+ ret { <vscale x 4 x i8>, i32 } %load
+}
+
+define { <vscale x 4 x i8>, i32 } @vploadff_nxv4i8_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv4i8_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 4 x i8>, i32 } @llvm.vp.load.ff.nxv4i8.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 4 x i8>, i32 } %load
+}
+
+define { <vscale x 8 x i8>, i32 } @vploadff_nxv8i8(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 8 x i8>, i32 } @llvm.vp.load.ff.nxv8i8.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
+ ret { <vscale x 8 x i8>, i32 } %load
+}
+
+define { <vscale x 8 x i8>, i32 } @vploadff_nxv8i8_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv8i8_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 8 x i8>, i32 } @llvm.vp.load.ff.nxv8i8.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 8 x i8>, i32 } %load
+}
+
+define { <vscale x 16 x i8>, i32 } @vploadff_nxv16i8(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 16 x i8>, i32 } @llvm.vp.load.ff.nxv16i8.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl)
+ ret { <vscale x 16 x i8>, i32 } %load
+}
+
+define { <vscale x 16 x i8>, i32 } @vploadff_nxv16i8_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv16i8_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 16 x i8>, i32 } @llvm.vp.load.ff.nxv16i8.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 16 x i8>, i32 } %load
+}
+
+define { <vscale x 32 x i8>, i32 } @vploadff_nxv32i8(ptr %ptr, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 32 x i8>, i32 } @llvm.vp.load.ff.nxv32i8.p0(ptr %ptr, <vscale x 32 x i1> %m, i32 %evl)
+ ret { <vscale x 32 x i8>, i32 } %load
+}
+
+define { <vscale x 32 x i8>, i32 } @vploadff_nxv32i8_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv32i8_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 32 x i8>, i32 } @llvm.vp.load.ff.nxv32i8.p0(ptr %ptr, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 32 x i8>, i32 } %load
+}
+
+define { <vscale x 64 x i8>, i32 } @vploadff_nxv64i8(ptr %ptr, <vscale x 64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv64i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 64 x i8>, i32 } @llvm.vp.load.ff.nxv64i8.p0(ptr %ptr, <vscale x 64 x i1> %m, i32 %evl)
+ ret { <vscale x 64 x i8>, i32 } %load
+}
+
+define { <vscale x 64 x i8>, i32 } @vploadff_nxv64i8_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv64i8_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 64 x i8>, i32 } @llvm.vp.load.ff.nxv64i8.p0(ptr %ptr, <vscale x 64 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 64 x i8>, i32 } %load
+}
+
+define <vscale x 128 x i8> @vploadff_nxv128i8(ptr %ptr, ptr %evl_out, <vscale x 128 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv128i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: csrr a3, vlenb
+; CHECK-NEXT: slli a3, a3, 3
+; CHECK-NEXT: bltu a2, a3, .LBB14_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a2, a3
+; CHECK-NEXT: .LBB14_2:
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: sw a0, 0(a1)
+; CHECK-NEXT: ret
+ %load = call { <vscale x 128 x i8>, i32 } @llvm.vp.load.ff.nxv128i8.p0(ptr %ptr, <vscale x 128 x i1> %m, i32 %evl)
+ %result0 = extractvalue { <vscale x 128 x i8>, i32 } %load, 0
+ %result1 = extractvalue { <vscale x 128 x i8>, i32 } %load, 1
+ store i32 %result1, ptr %evl_out
+ ret <vscale x 128 x i8> %result0
+}
+
+define <vscale x 128 x i8> @vploadff_nxv128i8_allones_mask(ptr %ptr, ptr %evl_out, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv128i8_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: csrr a3, vlenb
+; CHECK-NEXT: slli a3, a3, 3
+; CHECK-NEXT: bltu a2, a3, .LBB15_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a2, a3
+; CHECK-NEXT: .LBB15_2:
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: sw a0, 0(a1)
+; CHECK-NEXT: ret
+ %load = call { <vscale x 128 x i8>, i32 } @llvm.vp.load.ff.nxv128i8.p0(ptr %ptr, <vscale x 128 x i1> splat (i1 true), i32 %evl)
+ %result0 = extractvalue { <vscale x 128 x i8>, i32 } %load, 0
+ %result1 = extractvalue { <vscale x 128 x i8>, i32 } %load, 1
+ store i32 %result1, ptr %evl_out
+ ret <vscale x 128 x i8> %result0
+}
+
+define { <vscale x 1 x i16>, i32 } @vploadff_nxv1i16(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 1 x i16>, i32 } @llvm.vp.load.ff.nxv1i16.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
+ ret { <vscale x 1 x i16>, i32 } %load
+}
+
+define { <vscale x 1 x i16>, i32 } @vploadff_nxv1i16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv1i16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 1 x i16>, i32 } @llvm.vp.load.ff.nxv1i16.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 1 x i16>, i32 } %load
+}
+
+define { <vscale x 2 x i16>, i32 } @vploadff_nxv2i16(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 2 x i16>, i32 } @llvm.vp.load.ff.nxv2i16.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
+ ret { <vscale x 2 x i16>, i32 } %load
+}
+
+define { <vscale x 2 x i16>, i32 } @vploadff_nxv2i16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv2i16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 2 x i16>, i32 } @llvm.vp.load.ff.nxv2i16.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 2 x i16>, i32 } %load
+}
+
+define { <vscale x 4 x i16>, i32 } @vploadff_nxv4i16(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 4 x i16>, i32 } @llvm.vp.load.ff.nxv4i16.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
+ ret { <vscale x 4 x i16>, i32 } %load
+}
+
+define { <vscale x 4 x i16>, i32 } @vploadff_nxv4i16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv4i16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 4 x i16>, i32 } @llvm.vp.load.ff.nxv4i16.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 4 x i16>, i32 } %load
+}
+
+define { <vscale x 8 x i16>, i32 } @vploadff_nxv8i16(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 8 x i16>, i32 } @llvm.vp.load.ff.nxv8i16.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
+ ret { <vscale x 8 x i16>, i32 } %load
+}
+
+define { <vscale x 8 x i16>, i32 } @vploadff_nxv8i16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv8i16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 8 x i16>, i32 } @llvm.vp.load.ff.nxv8i16.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 8 x i16>, i32 } %load
+}
+
+define { <vscale x 16 x i16>, i32 } @vploadff_nxv16i16(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 16 x i16>, i32 } @llvm.vp.load.ff.nxv16i16.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl)
+ ret { <vscale x 16 x i16>, i32 } %load
+}
+
+define { <vscale x 16 x i16>, i32 } @vploadff_nxv16i16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv16i16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 16 x i16>, i32 } @llvm.vp.load.ff.nxv16i16.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 16 x i16>, i32 } %load
+}
+
+define { <vscale x 32 x i16>, i32 } @vploadff_nxv32i16(ptr %ptr, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 32 x i16>, i32 } @llvm.vp.load.ff.nxv32i16.p0(ptr %ptr, <vscale x 32 x i1> %m, i32 %evl)
+ ret { <vscale x 32 x i16>, i32 } %load
+}
+
+define { <vscale x 32 x i16>, i32 } @vploadff_nxv32i16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv32i16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 32 x i16>, i32 } @llvm.vp.load.ff.nxv32i16.p0(ptr %ptr, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 32 x i16>, i32 } %load
+}
+
+define { <vscale x 1 x i32>, i32 } @vploadff_nxv1i32(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 1 x i32>, i32 } @llvm.vp.load.ff.nxv1i32.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
+ ret { <vscale x 1 x i32>, i32 } %load
+}
+
+define { <vscale x 1 x i32>, i32 } @vploadff_nxv1i32_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv1i32_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 1 x i32>, i32 } @llvm.vp.load.ff.nxv1i32.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 1 x i32>, i32 } %load
+}
+
+define { <vscale x 2 x i32>, i32 } @vploadff_nxv2i32(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 2 x i32>, i32 } @llvm.vp.load.ff.nxv2i32.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
+ ret { <vscale x 2 x i32>, i32 } %load
+}
+
+define { <vscale x 2 x i32>, i32 } @vploadff_nxv2i32_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv2i32_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 2 x i32>, i32 } @llvm.vp.load.ff.nxv2i32.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 2 x i32>, i32 } %load
+}
+
+define { <vscale x 4 x i32>, i32 } @vploadff_nxv4i32(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 4 x i32>, i32 } @llvm.vp.load.ff.nxv4i32.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
+ ret { <vscale x 4 x i32>, i32 } %load
+}
+
+define { <vscale x 4 x i32>, i32 } @vploadff_nxv4i32_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv4i32_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 4 x i32>, i32 } @llvm.vp.load.ff.nxv4i32.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 4 x i32>, i32 } %load
+}
+
+define { <vscale x 8 x i32>, i32 } @vploadff_nxv8i32(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 8 x i32>, i32 } @llvm.vp.load.ff.nxv8i32.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
+ ret { <vscale x 8 x i32>, i32 } %load
+}
+
+define { <vscale x 8 x i32>, i32 } @vploadff_nxv8i32_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv8i32_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 8 x i32>, i32 } @llvm.vp.load.ff.nxv8i32.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 8 x i32>, i32 } %load
+}
+
+define { <vscale x 16 x i32>, i32 } @vploadff_nxv16i32(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 16 x i32>, i32 } @llvm.vp.load.ff.nxv16i32.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl)
+ ret { <vscale x 16 x i32>, i32 } %load
+}
+
+define { <vscale x 16 x i32>, i32 } @vploadff_nxv16i32_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv16i32_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 16 x i32>, i32 } @llvm.vp.load.ff.nxv16i32.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 16 x i32>, i32 } %load
+}
+
+define { <vscale x 1 x i64>, i32 } @vploadff_nxv1i64(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 1 x i64>, i32 } @llvm.vp.load.ff.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
+ ret { <vscale x 1 x i64>, i32 } %load
+}
+
+define { <vscale x 1 x i64>, i32 } @vploadff_nxv1i64_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv1i64_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 1 x i64>, i32 } @llvm.vp.load.ff.nxv1i64.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 1 x i64>, i32 } %load
+}
+
+define { <vscale x 2 x i64>, i32 } @vploadff_nxv2i64(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 2 x i64>, i32 } @llvm.vp.load.ff.nxv2i64.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
+ ret { <vscale x 2 x i64>, i32 } %load
+}
+
+define { <vscale x 2 x i64>, i32 } @vploadff_nxv2i64_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv2i64_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 2 x i64>, i32 } @llvm.vp.load.ff.nxv2i64.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 2 x i64>, i32 } %load
+}
+
+define { <vscale x 4 x i64>, i32 } @vploadff_nxv4i64(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 4 x i64>, i32 } @llvm.vp.load.ff.nxv4i64.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
+ ret { <vscale x 4 x i64>, i32 } %load
+}
+
+define { <vscale x 4 x i64>, i32 } @vploadff_nxv4i64_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv4i64_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 4 x i64>, i32 } @llvm.vp.load.ff.nxv4i64.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 4 x i64>, i32 } %load
+}
+
+define { <vscale x 8 x i64>, i32 } @vploadff_nxv8i64(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 8 x i64>, i32 } @llvm.vp.load.ff.nxv8i64.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
+ ret { <vscale x 8 x i64>, i32 } %load
+}
+
+define { <vscale x 8 x i64>, i32 } @vploadff_nxv8i64_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv8i64_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 8 x i64>, i32 } @llvm.vp.load.ff.nxv8i64.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 8 x i64>, i32 } %load
+}
+
+define { <vscale x 1 x half>, i32 } @vploadff_nxv1f16(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 1 x half>, i32 } @llvm.vp.load.ff.nxv1f16.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
+ ret { <vscale x 1 x half>, i32 } %load
+}
+
+define { <vscale x 1 x half>, i32 } @vploadff_nxv1f16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv1f16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 1 x half>, i32 } @llvm.vp.load.ff.nxv1f16.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 1 x half>, i32 } %load
+}
+
+define { <vscale x 2 x half>, i32 } @vploadff_nxv2f16(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 2 x half>, i32 } @llvm.vp.load.ff.nxv2f16.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
+ ret { <vscale x 2 x half>, i32 } %load
+}
+
+define { <vscale x 2 x half>, i32 } @vploadff_nxv2f16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv2f16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 2 x half>, i32 } @llvm.vp.load.ff.nxv2f16.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 2 x half>, i32 } %load
+}
+
+define { <vscale x 4 x half>, i32 } @vploadff_nxv4f16(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 4 x half>, i32 } @llvm.vp.load.ff.nxv4f16.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
+ ret { <vscale x 4 x half>, i32 } %load
+}
+
+define { <vscale x 4 x half>, i32 } @vploadff_nxv4f16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv4f16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 4 x half>, i32 } @llvm.vp.load.ff.nxv4f16.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 4 x half>, i32 } %load
+}
+
+define { <vscale x 8 x half>, i32 } @vploadff_nxv8f16(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 8 x half>, i32 } @llvm.vp.load.ff.nxv8f16.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
+ ret { <vscale x 8 x half>, i32 } %load
+}
+
+define { <vscale x 8 x half>, i32 } @vploadff_nxv8f16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv8f16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 8 x half>, i32 } @llvm.vp.load.ff.nxv8f16.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 8 x half>, i32 } %load
+}
+
+define { <vscale x 16 x half>, i32 } @vploadff_nxv16f16(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 16 x half>, i32 } @llvm.vp.load.ff.nxv16f16.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl)
+ ret { <vscale x 16 x half>, i32 } %load
+}
+
+define { <vscale x 16 x half>, i32 } @vploadff_nxv16f16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv16f16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 16 x half>, i32 } @llvm.vp.load.ff.nxv16f16.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 16 x half>, i32 } %load
+}
+
+define { <vscale x 32 x half>, i32 } @vploadff_nxv32f16(ptr %ptr, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv32f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 32 x half>, i32 } @llvm.vp.load.ff.nxv32f16.p0(ptr %ptr, <vscale x 32 x i1> %m, i32 %evl)
+ ret { <vscale x 32 x half>, i32 } %load
+}
+
+define { <vscale x 32 x half>, i32 } @vploadff_nxv32f16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv32f16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 32 x half>, i32 } @llvm.vp.load.ff.nxv32f16.p0(ptr %ptr, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 32 x half>, i32 } %load
+}
+
+define { <vscale x 1 x float>, i32 } @vploadff_nxv1f32(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 1 x float>, i32 } @llvm.vp.load.ff.nxv1f32.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
+ ret { <vscale x 1 x float>, i32 } %load
+}
+
+define { <vscale x 1 x float>, i32 } @vploadff_nxv1f32_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv1f32_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 1 x float>, i32 } @llvm.vp.load.ff.nxv1f32.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 1 x float>, i32 } %load
+}
+
+define { <vscale x 2 x float>, i32 } @vploadff_nxv2f32(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 2 x float>, i32 } @llvm.vp.load.ff.nxv2f32.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
+ ret { <vscale x 2 x float>, i32 } %load
+}
+
+define { <vscale x 2 x float>, i32 } @vploadff_nxv2f32_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv2f32_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 2 x float>, i32 } @llvm.vp.load.ff.nxv2f32.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 2 x float>, i32 } %load
+}
+
+define { <vscale x 4 x float>, i32 } @vploadff_nxv4f32(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 4 x float>, i32 } @llvm.vp.load.ff.nxv4f32.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
+ ret { <vscale x 4 x float>, i32 } %load
+}
+
+define { <vscale x 4 x float>, i32 } @vploadff_nxv4f32_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv4f32_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 4 x float>, i32 } @llvm.vp.load.ff.nxv4f32.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 4 x float>, i32 } %load
+}
+
+define { <vscale x 8 x float>, i32 } @vploadff_nxv8f32(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 8 x float>, i32 } @llvm.vp.load.ff.nxv8f32.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
+ ret { <vscale x 8 x float>, i32 } %load
+}
+
+define { <vscale x 8 x float>, i32 } @vploadff_nxv8f32_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv8f32_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 8 x float>, i32 } @llvm.vp.load.ff.nxv8f32.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 8 x float>, i32 } %load
+}
+
+define { <vscale x 16 x float>, i32 } @vploadff_nxv16f32(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 16 x float>, i32 } @llvm.vp.load.ff.nxv16f32.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl)
+ ret { <vscale x 16 x float>, i32 } %load
+}
+
+define { <vscale x 16 x float>, i32 } @vploadff_nxv16f32_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv16f32_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vle32ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 16 x float>, i32 } @llvm.vp.load.ff.nxv16f32.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 16 x float>, i32 } %load
+}
+
+define { <vscale x 1 x double>, i32 } @vploadff_nxv1f64(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 1 x double>, i32 } @llvm.vp.load.ff.nxv1f64.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
+ ret { <vscale x 1 x double>, i32 } %load
+}
+
+define { <vscale x 1 x double>, i32 } @vploadff_nxv1f64_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv1f64_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 1 x double>, i32 } @llvm.vp.load.ff.nxv1f64.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 1 x double>, i32 } %load
+}
+
+define { <vscale x 2 x double>, i32 } @vploadff_nxv2f64(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 2 x double>, i32 } @llvm.vp.load.ff.nxv2f64.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
+ ret { <vscale x 2 x double>, i32 } %load
+}
+
+define { <vscale x 2 x double>, i32 } @vploadff_nxv2f64_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv2f64_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 2 x double>, i32 } @llvm.vp.load.ff.nxv2f64.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 2 x double>, i32 } %load
+}
+
+define { <vscale x 4 x double>, i32 } @vploadff_nxv4f64(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 4 x double>, i32 } @llvm.vp.load.ff.nxv4f64.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
+ ret { <vscale x 4 x double>, i32 } %load
+}
+
+define { <vscale x 4 x double>, i32 } @vploadff_nxv4f64_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv4f64_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 4 x double>, i32 } @llvm.vp.load.ff.nxv4f64.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 4 x double>, i32 } %load
+}
+
+define { <vscale x 8 x double>, i32 } @vploadff_nxv8f64(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 8 x double>, i32 } @llvm.vp.load.ff.nxv8f64.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
+ ret { <vscale x 8 x double>, i32 } %load
+}
+
+define { <vscale x 8 x double>, i32 } @vploadff_nxv8f64_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv8f64_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vle64ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 8 x double>, i32 } @llvm.vp.load.ff.nxv8f64.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 8 x double>, i32 } %load
+}
+
+define { <vscale x 1 x bfloat>, i32 } @vploadff_nxv1bf16(ptr %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv1bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 1 x bfloat>, i32 } @llvm.vp.load.ff.nxv1bf16.p0(ptr %ptr, <vscale x 1 x i1> %m, i32 %evl)
+ ret { <vscale x 1 x bfloat>, i32 } %load
+}
+
+define { <vscale x 1 x bfloat>, i32 } @vploadff_nxv1bf16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv1bf16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 1 x bfloat>, i32 } @llvm.vp.load.ff.nxv1bf16.p0(ptr %ptr, <vscale x 1 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 1 x bfloat>, i32 } %load
+}
+
+define { <vscale x 2 x bfloat>, i32 } @vploadff_nxv2bf16(ptr %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv2bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 2 x bfloat>, i32 } @llvm.vp.load.ff.nxv2bf16.p0(ptr %ptr, <vscale x 2 x i1> %m, i32 %evl)
+ ret { <vscale x 2 x bfloat>, i32 } %load
+}
+
+define { <vscale x 2 x bfloat>, i32 } @vploadff_nxv2bf16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv2bf16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 2 x bfloat>, i32 } @llvm.vp.load.ff.nxv2bf16.p0(ptr %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 2 x bfloat>, i32 } %load
+}
+
+define { <vscale x 4 x bfloat>, i32 } @vploadff_nxv4bf16(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv4bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 4 x bfloat>, i32 } @llvm.vp.load.ff.nxv4bf16.p0(ptr %ptr, <vscale x 4 x i1> %m, i32 %evl)
+ ret { <vscale x 4 x bfloat>, i32 } %load
+}
+
+define { <vscale x 4 x bfloat>, i32 } @vploadff_nxv4bf16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv4bf16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 4 x bfloat>, i32 } @llvm.vp.load.ff.nxv4bf16.p0(ptr %ptr, <vscale x 4 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 4 x bfloat>, i32 } %load
+}
+
+define { <vscale x 8 x bfloat>, i32 } @vploadff_nxv8bf16(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 8 x bfloat>, i32 } @llvm.vp.load.ff.nxv8bf16.p0(ptr %ptr, <vscale x 8 x i1> %m, i32 %evl)
+ ret { <vscale x 8 x bfloat>, i32 } %load
+}
+
+define { <vscale x 8 x bfloat>, i32 } @vploadff_nxv8bf16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv8bf16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 8 x bfloat>, i32 } @llvm.vp.load.ff.nxv8bf16.p0(ptr %ptr, <vscale x 8 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 8 x bfloat>, i32 } %load
+}
+
+define { <vscale x 16 x bfloat>, i32 } @vploadff_nxv16bf16(ptr %ptr, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv16bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 16 x bfloat>, i32 } @llvm.vp.load.ff.nxv16bf16.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl)
+ ret { <vscale x 16 x bfloat>, i32 } %load
+}
+
+define { <vscale x 16 x bfloat>, i32 } @vploadff_nxv16bf16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv16bf16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 16 x bfloat>, i32 } @llvm.vp.load.ff.nxv16bf16.p0(ptr %ptr, <vscale x 16 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 16 x bfloat>, i32 } %load
+}
+
+define { <vscale x 32 x bfloat>, i32 } @vploadff_nxv32bf16(ptr %ptr, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv32bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 32 x bfloat>, i32 } @llvm.vp.load.ff.nxv32bf16.p0(ptr %ptr, <vscale x 32 x i1> %m, i32 %evl)
+ ret { <vscale x 32 x bfloat>, i32 } %load
+}
+
+define { <vscale x 32 x bfloat>, i32 } @vploadff_nxv32bf16_allones_mask(ptr %ptr, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv32bf16_allones_mask:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vle16ff.v v8, (a0)
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 32 x bfloat>, i32 } @llvm.vp.load.ff.nxv32bf16.p0(ptr %ptr, <vscale x 32 x i1> splat (i1 true), i32 %evl)
+ ret { <vscale x 32 x bfloat>, i32 } %load
+}
+
+define { <vscale x 3 x i8>, i32 } @vploadff_nxv3i8(ptr %ptr, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vploadff_nxv3i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vle8ff.v v8, (a0), v0.t
+; CHECK-NEXT: csrr a0, vl
+; CHECK-NEXT: ret
+ %load = call { <vscale x 3 x i8>, i32 } @llvm.vp.load.ff.nxv3i8.p0(ptr %ptr, <vscale x 3 x i1> %m, i32 %evl)
+ ret { <vscale x 3 x i8>, i32 } %load
+}
diff --git a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
index c9c49e8..cb046cd 100644
--- a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
@@ -204,18 +204,16 @@ define i64 @load_i64(ptr %p) {
; RV64IZBKB-NEXT: lbu a2, 5(a0)
; RV64IZBKB-NEXT: lbu a3, 6(a0)
; RV64IZBKB-NEXT: lbu a4, 7(a0)
-; RV64IZBKB-NEXT: lbu a5, 0(a0)
-; RV64IZBKB-NEXT: lbu a6, 1(a0)
-; RV64IZBKB-NEXT: lbu a7, 2(a0)
-; RV64IZBKB-NEXT: lbu a0, 3(a0)
+; RV64IZBKB-NEXT: lbu a5, 1(a0)
+; RV64IZBKB-NEXT: lbu a6, 2(a0)
+; RV64IZBKB-NEXT: lbu a7, 3(a0)
+; RV64IZBKB-NEXT: lbu a0, 0(a0)
+; RV64IZBKB-NEXT: packh a3, a3, a4
; RV64IZBKB-NEXT: packh a1, a1, a2
-; RV64IZBKB-NEXT: packh a2, a3, a4
-; RV64IZBKB-NEXT: packh a3, a5, a6
-; RV64IZBKB-NEXT: packh a0, a7, a0
-; RV64IZBKB-NEXT: slli a2, a2, 16
-; RV64IZBKB-NEXT: slli a0, a0, 16
-; RV64IZBKB-NEXT: or a1, a2, a1
-; RV64IZBKB-NEXT: or a0, a0, a3
+; RV64IZBKB-NEXT: packh a2, a6, a7
+; RV64IZBKB-NEXT: packh a0, a0, a5
+; RV64IZBKB-NEXT: packw a1, a1, a3
+; RV64IZBKB-NEXT: packw a0, a0, a2
; RV64IZBKB-NEXT: pack a0, a0, a1
; RV64IZBKB-NEXT: ret
;
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll
new file mode 100644
index 0000000..00e9185
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll
@@ -0,0 +1,75 @@
+; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv1.6-vulkan1.3-library %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv1.6-vulkan1.3-library %s -o - -filetype=obj | spirv-val --target-env vulkan1.3 %}
+
+@.str = private unnamed_addr constant [2 x i8] c"b\00", align 1
+@.str.2 = private unnamed_addr constant [2 x i8] c"c\00", align 1
+@.str.4 = private unnamed_addr constant [2 x i8] c"d\00", align 1
+@.str.6 = private unnamed_addr constant [2 x i8] c"e\00", align 1
+@.str.8 = private unnamed_addr constant [2 x i8] c"f\00", align 1
+@.str.10 = private unnamed_addr constant [2 x i8] c"g\00", align 1
+@.str.12 = private unnamed_addr constant [2 x i8] c"h\00", align 1
+@.str.14 = private unnamed_addr constant [2 x i8] c"i\00", align 1
+
+; CHECK-DAG: OpName [[b:%[0-9]+]] "b"
+; CHECK-DAG: OpName [[c:%[0-9]+]] "c"
+; CHECK-DAG: OpName [[d:%[0-9]+]] "d"
+; CHECK-DAG: OpName [[e:%[0-9]+]] "e"
+; CHECK-DAG: OpName [[f:%[0-9]+]] "f"
+; CHECK-DAG: OpName [[g:%[0-9]+]] "g"
+; CHECK-DAG: OpName [[h:%[0-9]+]] "h"
+; CHECK-DAG: OpName [[i:%[0-9]+]] "i"
+; CHECK-DAG: OpDecorate [[b]] DescriptorSet 0
+; CHECK-DAG: OpDecorate [[b]] Binding 1
+; CHECK-DAG: OpDecorate [[c]] DescriptorSet 0
+; CHECK-DAG: OpDecorate [[c]] Binding 0
+; CHECK-DAG: OpDecorate [[d]] DescriptorSet 0
+; CHECK-DAG: OpDecorate [[d]] Binding 3
+; CHECK-DAG: OpDecorate [[e]] DescriptorSet 0
+; CHECK-DAG: OpDecorate [[e]] Binding 2
+; CHECK-DAG: OpDecorate [[f]] DescriptorSet 10
+; CHECK-DAG: OpDecorate [[f]] Binding 1
+; CHECK-DAG: OpDecorate [[g]] DescriptorSet 10
+; CHECK-DAG: OpDecorate [[g]] Binding 0
+; CHECK-DAG: OpDecorate [[h]] DescriptorSet 10
+; CHECK-DAG: OpDecorate [[h]] Binding 3
+; CHECK-DAG: OpDecorate [[i]] DescriptorSet 10
+; CHECK-DAG: OpDecorate [[i]] Binding 2
+
+
+define void @main() local_unnamed_addr #0 {
+entry:
+ %0 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 0, i32 1, i32 0, i1 false, ptr nonnull @.str)
+ %1 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 0, i32 1, i32 0, i1 false, ptr nonnull @.str.2)
+ %2 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 1, i32 0, i32 1, i32 0, i1 false, ptr nonnull @.str.4)
+ %3 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 2, i32 1, i32 0, i1 false, ptr nonnull @.str.6)
+ %4 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 10, i32 1, i32 1, i32 0, i1 false, ptr nonnull @.str.8)
+ %5 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 2, i32 10, i32 1, i32 0, i1 false, ptr nonnull @.str.10)
+ %6 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 3, i32 10, i32 1, i32 0, i1 false, ptr nonnull @.str.12)
+ %7 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 10, i32 2, i32 1, i32 0, i1 false, ptr nonnull @.str.14)
+ %8 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %1, i32 0)
+ %9 = load i32, ptr addrspace(11) %8, align 4
+ %10 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %2, i32 0)
+ %11 = load i32, ptr addrspace(11) %10, align 4
+ %add.i = add nsw i32 %11, %9
+ %12 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %3, i32 0)
+ %13 = load i32, ptr addrspace(11) %12, align 4
+ %add4.i = add nsw i32 %add.i, %13
+ %14 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %4, i32 0)
+ %15 = load i32, ptr addrspace(11) %14, align 4
+ %add6.i = add nsw i32 %add4.i, %15
+ %16 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %5, i32 0)
+ %17 = load i32, ptr addrspace(11) %16, align 4
+ %add8.i = add nsw i32 %add6.i, %17
+ %18 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %6, i32 0)
+ %19 = load i32, ptr addrspace(11) %18, align 4
+ %add10.i = add nsw i32 %add8.i, %19
+ %20 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %7, i32 0)
+ %21 = load i32, ptr addrspace(11) %20, align 4
+ %add12.i = add nsw i32 %add10.i, %21
+ %22 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %0, i32 0)
+ store i32 %add12.i, ptr addrspace(11) %22, align 4
+ ret void
+}
+
+
+attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } \ No newline at end of file
diff --git a/llvm/test/CodeGen/X86/trunc-nsw-nuw.ll b/llvm/test/CodeGen/X86/trunc-nsw-nuw.ll
index 5c5f704..6b07891 100644
--- a/llvm/test/CodeGen/X86/trunc-nsw-nuw.ll
+++ b/llvm/test/CodeGen/X86/trunc-nsw-nuw.ll
@@ -62,10 +62,11 @@ entry:
define i32 @simplify_demanded_bits_drop_flag(i1 zeroext %x, i1 zeroext %y) nounwind {
; CHECK-LABEL: simplify_demanded_bits_drop_flag:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: negl %edi
+; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
; CHECK-NEXT: shll $2, %esi
-; CHECK-NEXT: xorl %edi, %esi
-; CHECK-NEXT: movslq %esi, %rax
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: negq %rax
+; CHECK-NEXT: xorq %rsi, %rax
; CHECK-NEXT: imulq $-1634202141, %rax, %rax # imm = 0x9E980DE3
; CHECK-NEXT: movq %rax, %rcx
; CHECK-NEXT: shrq $63, %rcx
diff --git a/llvm/test/CodeGen/X86/xray-custom-log.ll b/llvm/test/CodeGen/X86/xray-custom-log.ll
index 8f23055..f4cdc23 100644
--- a/llvm/test/CodeGen/X86/xray-custom-log.ll
+++ b/llvm/test/CodeGen/X86/xray-custom-log.ll
@@ -1,9 +1,6 @@
; RUN: llc -mtriple=x86_64 < %s | FileCheck %s
; RUN: llc -mtriple=x86_64 -relocation-model=pic < %s | FileCheck %s --check-prefix=PIC
-; RUN: llc -mtriple=x86_64 -filetype=obj %s -o %t
-; RUN: llvm-dwarfdump %t | FileCheck %s --check-prefix=DBG
-
define i32 @customevent() nounwind "function-instrument"="xray-always" !dbg !1 {
%eventptr = alloca i8
%eventsize = alloca i64
@@ -93,17 +90,6 @@ define void @leaf_func() "function-instrument"="xray-always" "frame-pointer"="no
declare void @llvm.xray.customevent(ptr, i64)
declare void @llvm.xray.typedevent(i64, ptr, i64)
-;; Construct call site entries for PATCHABLE_EVENT_CALL.
-; DBG: DW_TAG_subprogram
-; DBG: DW_TAG_call_site
-; DBG-NEXT: DW_AT_call_target (DW_OP_reg{{.*}})
-; DBG-NEXT: DW_AT_call_return_pc
-
-; DBG: DW_TAG_subprogram
-; DBG: DW_TAG_call_site
-; DBG-NEXT: DW_AT_call_target (DW_OP_reg{{.*}})
-; DBG-NEXT: DW_AT_call_return_pc
-
!llvm.dbg.cu = !{!7}
!llvm.module.flags = !{!10, !11}
diff --git a/llvm/test/CodeGen/Xtensa/atomic-load-store.ll b/llvm/test/CodeGen/Xtensa/atomic-load-store.ll
new file mode 100644
index 0000000..bd843a3
--- /dev/null
+++ b/llvm/test/CodeGen/Xtensa/atomic-load-store.ll
@@ -0,0 +1,498 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=xtensa -mattr=+windowed < %s | FileCheck %s --check-prefixes=XTENSA
+; RUN: llc -mtriple=xtensa -mattr=+windowed,s32c1i < %s | FileCheck %s --check-prefixes=XTENSA-ATOMIC
+
+define i8 @atomic_load_i8_unordered(ptr %a) nounwind {
+; XTENSA-LABEL: atomic_load_i8_unordered:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 0
+; XTENSA-NEXT: l32r a8, .LCPI0_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_load_i8_unordered:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0
+; XTENSA-ATOMIC-NEXT: retw
+ %1 = load atomic i8, ptr %a unordered, align 1
+ ret i8 %1
+}
+
+define i8 @atomic_load_i8_monotonic(ptr %a) nounwind {
+; XTENSA-LABEL: atomic_load_i8_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 0
+; XTENSA-NEXT: l32r a8, .LCPI1_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_load_i8_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0
+; XTENSA-ATOMIC-NEXT: retw
+ %1 = load atomic i8, ptr %a monotonic, align 1
+ ret i8 %1
+}
+
+define i8 @atomic_load_i8_acquire(ptr %a) nounwind {
+; XTENSA-LABEL: atomic_load_i8_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 2
+; XTENSA-NEXT: l32r a8, .LCPI2_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_load_i8_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %1 = load atomic i8, ptr %a acquire, align 1
+ ret i8 %1
+}
+
+define i8 @atomic_load_i8_seq_cst(ptr %a) nounwind {
+; XTENSA-LABEL: atomic_load_i8_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 5
+; XTENSA-NEXT: l32r a8, .LCPI3_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_load_i8_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %1 = load atomic i8, ptr %a seq_cst, align 1
+ ret i8 %1
+}
+
+define i16 @atomic_load_i16_unordered(ptr %a) nounwind {
+; XTENSA-LABEL: atomic_load_i16_unordered:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 0
+; XTENSA-NEXT: l32r a8, .LCPI4_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_load_i16_unordered:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0
+; XTENSA-ATOMIC-NEXT: retw
+ %1 = load atomic i16, ptr %a unordered, align 2
+ ret i16 %1
+}
+
+define i16 @atomic_load_i16_monotonic(ptr %a) nounwind {
+; XTENSA-LABEL: atomic_load_i16_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 0
+; XTENSA-NEXT: l32r a8, .LCPI5_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_load_i16_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0
+; XTENSA-ATOMIC-NEXT: retw
+ %1 = load atomic i16, ptr %a monotonic, align 2
+ ret i16 %1
+}
+
+define i16 @atomic_load_i16_acquire(ptr %a) nounwind {
+; XTENSA-LABEL: atomic_load_i16_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 2
+; XTENSA-NEXT: l32r a8, .LCPI6_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_load_i16_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %1 = load atomic i16, ptr %a acquire, align 2
+ ret i16 %1
+}
+
+define i16 @atomic_load_i16_seq_cst(ptr %a) nounwind {
+; XTENSA-LABEL: atomic_load_i16_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 5
+; XTENSA-NEXT: l32r a8, .LCPI7_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_load_i16_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %1 = load atomic i16, ptr %a seq_cst, align 2
+ ret i16 %1
+}
+
+define i32 @atomic_load_i32_unordered(ptr %a) nounwind {
+; XTENSA-LABEL: atomic_load_i32_unordered:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 0
+; XTENSA-NEXT: l32r a8, .LCPI8_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_load_i32_unordered:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0
+; XTENSA-ATOMIC-NEXT: retw
+ %1 = load atomic i32, ptr %a unordered, align 4
+ ret i32 %1
+}
+
+define i32 @atomic_load_i32_monotonic(ptr %a) nounwind {
+; XTENSA-LABEL: atomic_load_i32_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 0
+; XTENSA-NEXT: l32r a8, .LCPI9_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_load_i32_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0
+; XTENSA-ATOMIC-NEXT: retw
+ %1 = load atomic i32, ptr %a monotonic, align 4
+ ret i32 %1
+}
+
+define i32 @atomic_load_i32_acquire(ptr %a) nounwind {
+; XTENSA-LABEL: atomic_load_i32_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 2
+; XTENSA-NEXT: l32r a8, .LCPI10_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_load_i32_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %1 = load atomic i32, ptr %a acquire, align 4
+ ret i32 %1
+}
+
+define i32 @atomic_load_i32_seq_cst(ptr %a) nounwind {
+; XTENSA-LABEL: atomic_load_i32_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 5
+; XTENSA-NEXT: l32r a8, .LCPI11_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_load_i32_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %1 = load atomic i32, ptr %a seq_cst, align 4
+ ret i32 %1
+}
+
+define void @atomic_store_i8_unordered(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomic_store_i8_unordered:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI12_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_store_i8_unordered:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: s8i a3, a2, 0
+; XTENSA-ATOMIC-NEXT: retw
+ store atomic i8 %b, ptr %a unordered, align 1
+ ret void
+}
+
+define void @atomic_store_i8_monotonic(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomic_store_i8_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI13_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_store_i8_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: s8i a3, a2, 0
+; XTENSA-ATOMIC-NEXT: retw
+ store atomic i8 %b, ptr %a monotonic, align 1
+ ret void
+}
+
+define void @atomic_store_i8_release(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomic_store_i8_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI14_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_store_i8_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: s8i a3, a2, 0
+; XTENSA-ATOMIC-NEXT: retw
+ store atomic i8 %b, ptr %a release, align 1
+ ret void
+}
+
+define void @atomic_store_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomic_store_i8_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI15_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_store_i8_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: s8i a3, a2, 0
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ store atomic i8 %b, ptr %a seq_cst, align 1
+ ret void
+}
+
+define void @atomic_store_i16_unordered(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomic_store_i16_unordered:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI16_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_store_i16_unordered:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: s16i a3, a2, 0
+; XTENSA-ATOMIC-NEXT: retw
+ store atomic i16 %b, ptr %a unordered, align 2
+ ret void
+}
+
+define void @atomic_store_i16_monotonic(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomic_store_i16_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI17_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_store_i16_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: s16i a3, a2, 0
+; XTENSA-ATOMIC-NEXT: retw
+ store atomic i16 %b, ptr %a monotonic, align 2
+ ret void
+}
+
+define void @atomic_store_i16_release(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomic_store_i16_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI18_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_store_i16_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: s16i a3, a2, 0
+; XTENSA-ATOMIC-NEXT: retw
+ store atomic i16 %b, ptr %a release, align 2
+ ret void
+}
+
+define void @atomic_store_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomic_store_i16_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI19_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_store_i16_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: s16i a3, a2, 0
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ store atomic i16 %b, ptr %a seq_cst, align 2
+ ret void
+}
+
+define void @atomic_store_i32_unordered(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomic_store_i32_unordered:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI20_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_store_i32_unordered:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: s32i a3, a2, 0
+; XTENSA-ATOMIC-NEXT: retw
+ store atomic i32 %b, ptr %a unordered, align 4
+ ret void
+}
+
+define void @atomic_store_i32_monotonic(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomic_store_i32_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI21_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_store_i32_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: s32i a3, a2, 0
+; XTENSA-ATOMIC-NEXT: retw
+ store atomic i32 %b, ptr %a monotonic, align 4
+ ret void
+}
+
+define void @atomic_store_i32_release(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomic_store_i32_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI22_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_store_i32_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: s32i a3, a2, 0
+; XTENSA-ATOMIC-NEXT: retw
+ store atomic i32 %b, ptr %a release, align 4
+ ret void
+}
+
+define void @atomic_store_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomic_store_i32_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI23_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomic_store_i32_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: s32i a3, a2, 0
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ store atomic i32 %b, ptr %a seq_cst, align 4
+ ret void
+}
diff --git a/llvm/test/CodeGen/Xtensa/atomic-rmw.ll b/llvm/test/CodeGen/Xtensa/atomic-rmw.ll
new file mode 100644
index 0000000..81cb2dd
--- /dev/null
+++ b/llvm/test/CodeGen/Xtensa/atomic-rmw.ll
@@ -0,0 +1,10298 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=xtensa -mattr=+windowed < %s | FileCheck %s --check-prefixes=XTENSA
+; RUN: llc -mtriple=xtensa -mattr=+windowed,s32c1i < %s | FileCheck %s --check-prefixes=XTENSA-ATOMIC
+
+define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xchg_i8_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI0_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a10, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 1
+; XTENSA-ATOMIC-NEXT: j .LBB0_2
+; XTENSA-ATOMIC-NEXT: .LBB0_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB0_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a14, a14
+; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB0_4
+; XTENSA-ATOMIC-NEXT: .LBB0_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a14, a15, a10
+; XTENSA-ATOMIC-NEXT: or a14, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB0_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB0_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB0_1
+; XTENSA-ATOMIC-NEXT: .LBB0_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xchg ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xchg_i8_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 2
+; XTENSA-NEXT: l32r a8, .LCPI1_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a10, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 1
+; XTENSA-ATOMIC-NEXT: j .LBB1_2
+; XTENSA-ATOMIC-NEXT: .LBB1_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB1_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a14, a14
+; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB1_4
+; XTENSA-ATOMIC-NEXT: .LBB1_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a14, a15, a10
+; XTENSA-ATOMIC-NEXT: or a14, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB1_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB1_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB1_1
+; XTENSA-ATOMIC-NEXT: .LBB1_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xchg ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xchg_i8_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI2_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a10, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 1
+; XTENSA-ATOMIC-NEXT: j .LBB2_2
+; XTENSA-ATOMIC-NEXT: .LBB2_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB2_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a14, a14
+; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB2_4
+; XTENSA-ATOMIC-NEXT: .LBB2_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a14, a15, a10
+; XTENSA-ATOMIC-NEXT: or a14, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB2_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB2_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB2_1
+; XTENSA-ATOMIC-NEXT: .LBB2_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xchg ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xchg_i8_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 4
+; XTENSA-NEXT: l32r a8, .LCPI3_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a10, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 1
+; XTENSA-ATOMIC-NEXT: j .LBB3_2
+; XTENSA-ATOMIC-NEXT: .LBB3_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB3_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a14, a14
+; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB3_4
+; XTENSA-ATOMIC-NEXT: .LBB3_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a14, a15, a10
+; XTENSA-ATOMIC-NEXT: or a14, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB3_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB3_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB3_1
+; XTENSA-ATOMIC-NEXT: .LBB3_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xchg ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xchg_i8_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI4_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i8_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a10, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 1
+; XTENSA-ATOMIC-NEXT: j .LBB4_2
+; XTENSA-ATOMIC-NEXT: .LBB4_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB4_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a14, a14
+; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB4_4
+; XTENSA-ATOMIC-NEXT: .LBB4_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a14, a15, a10
+; XTENSA-ATOMIC-NEXT: or a14, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB4_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB4_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB4_1
+; XTENSA-ATOMIC-NEXT: .LBB4_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xchg ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_add_i8_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI5_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a10, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a11, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -4
+; XTENSA-ATOMIC-NEXT: and a12, a2, a12
+; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB5_2
+; XTENSA-ATOMIC-NEXT: .LBB5_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB5_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB5_4
+; XTENSA-ATOMIC-NEXT: .LBB5_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a11
+; XTENSA-ATOMIC-NEXT: add a6, a15, a9
+; XTENSA-ATOMIC-NEXT: and a6, a6, a10
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB5_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB5_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB5_1
+; XTENSA-ATOMIC-NEXT: .LBB5_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw add ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_add_i8_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 2
+; XTENSA-NEXT: l32r a8, .LCPI6_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a10, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a11, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -4
+; XTENSA-ATOMIC-NEXT: and a12, a2, a12
+; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB6_2
+; XTENSA-ATOMIC-NEXT: .LBB6_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB6_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB6_4
+; XTENSA-ATOMIC-NEXT: .LBB6_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a11
+; XTENSA-ATOMIC-NEXT: add a6, a15, a9
+; XTENSA-ATOMIC-NEXT: and a6, a6, a10
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB6_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB6_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB6_1
+; XTENSA-ATOMIC-NEXT: .LBB6_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw add ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_add_i8_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI7_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a10, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a11, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -4
+; XTENSA-ATOMIC-NEXT: and a12, a2, a12
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB7_2
+; XTENSA-ATOMIC-NEXT: .LBB7_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB7_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB7_4
+; XTENSA-ATOMIC-NEXT: .LBB7_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a11
+; XTENSA-ATOMIC-NEXT: add a6, a15, a9
+; XTENSA-ATOMIC-NEXT: and a6, a6, a10
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB7_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB7_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB7_1
+; XTENSA-ATOMIC-NEXT: .LBB7_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw add ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_add_i8_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 4
+; XTENSA-NEXT: l32r a8, .LCPI8_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a10, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a11, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -4
+; XTENSA-ATOMIC-NEXT: and a12, a2, a12
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB8_2
+; XTENSA-ATOMIC-NEXT: .LBB8_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB8_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB8_4
+; XTENSA-ATOMIC-NEXT: .LBB8_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a11
+; XTENSA-ATOMIC-NEXT: add a6, a15, a9
+; XTENSA-ATOMIC-NEXT: and a6, a6, a10
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB8_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB8_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB8_1
+; XTENSA-ATOMIC-NEXT: .LBB8_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw add ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_add_i8_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI9_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_add_i8_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a10, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a11, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -4
+; XTENSA-ATOMIC-NEXT: and a12, a2, a12
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB9_2
+; XTENSA-ATOMIC-NEXT: .LBB9_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB9_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB9_4
+; XTENSA-ATOMIC-NEXT: .LBB9_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a11
+; XTENSA-ATOMIC-NEXT: add a6, a15, a9
+; XTENSA-ATOMIC-NEXT: and a6, a6, a10
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB9_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB9_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB9_1
+; XTENSA-ATOMIC-NEXT: .LBB9_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw add ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_sub_i8_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI10_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a10, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a11, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -4
+; XTENSA-ATOMIC-NEXT: and a12, a2, a12
+; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB10_2
+; XTENSA-ATOMIC-NEXT: .LBB10_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB10_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB10_4
+; XTENSA-ATOMIC-NEXT: .LBB10_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a11
+; XTENSA-ATOMIC-NEXT: sub a6, a15, a9
+; XTENSA-ATOMIC-NEXT: and a6, a6, a10
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB10_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB10_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB10_1
+; XTENSA-ATOMIC-NEXT: .LBB10_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw sub ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_sub_i8_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 2
+; XTENSA-NEXT: l32r a8, .LCPI11_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a10, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a11, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -4
+; XTENSA-ATOMIC-NEXT: and a12, a2, a12
+; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB11_2
+; XTENSA-ATOMIC-NEXT: .LBB11_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB11_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB11_4
+; XTENSA-ATOMIC-NEXT: .LBB11_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a11
+; XTENSA-ATOMIC-NEXT: sub a6, a15, a9
+; XTENSA-ATOMIC-NEXT: and a6, a6, a10
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB11_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB11_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB11_1
+; XTENSA-ATOMIC-NEXT: .LBB11_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw sub ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_sub_i8_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI12_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a10, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a11, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -4
+; XTENSA-ATOMIC-NEXT: and a12, a2, a12
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB12_2
+; XTENSA-ATOMIC-NEXT: .LBB12_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB12_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB12_4
+; XTENSA-ATOMIC-NEXT: .LBB12_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a11
+; XTENSA-ATOMIC-NEXT: sub a6, a15, a9
+; XTENSA-ATOMIC-NEXT: and a6, a6, a10
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB12_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB12_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB12_1
+; XTENSA-ATOMIC-NEXT: .LBB12_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw sub ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_sub_i8_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 4
+; XTENSA-NEXT: l32r a8, .LCPI13_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a10, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a11, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -4
+; XTENSA-ATOMIC-NEXT: and a12, a2, a12
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB13_2
+; XTENSA-ATOMIC-NEXT: .LBB13_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB13_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB13_4
+; XTENSA-ATOMIC-NEXT: .LBB13_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a11
+; XTENSA-ATOMIC-NEXT: sub a6, a15, a9
+; XTENSA-ATOMIC-NEXT: and a6, a6, a10
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB13_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB13_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB13_1
+; XTENSA-ATOMIC-NEXT: .LBB13_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw sub ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_sub_i8_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI14_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i8_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a10, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a11, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -4
+; XTENSA-ATOMIC-NEXT: and a12, a2, a12
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB14_2
+; XTENSA-ATOMIC-NEXT: .LBB14_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB14_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB14_4
+; XTENSA-ATOMIC-NEXT: .LBB14_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a11
+; XTENSA-ATOMIC-NEXT: sub a6, a15, a9
+; XTENSA-ATOMIC-NEXT: and a6, a6, a10
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB14_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB14_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB14_1
+; XTENSA-ATOMIC-NEXT: .LBB14_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw sub ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_and_i8_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI15_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: and a10, a3, a9
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a11
+; XTENSA-ATOMIC-NEXT: or a9, a10, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB15_2
+; XTENSA-ATOMIC-NEXT: .LBB15_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB15_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB15_4
+; XTENSA-ATOMIC-NEXT: .LBB15_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB15_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB15_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB15_1
+; XTENSA-ATOMIC-NEXT: .LBB15_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw and ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_and_i8_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 2
+; XTENSA-NEXT: l32r a8, .LCPI16_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: and a10, a3, a9
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a11
+; XTENSA-ATOMIC-NEXT: or a9, a10, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB16_2
+; XTENSA-ATOMIC-NEXT: .LBB16_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB16_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB16_4
+; XTENSA-ATOMIC-NEXT: .LBB16_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB16_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB16_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB16_1
+; XTENSA-ATOMIC-NEXT: .LBB16_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw and ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_and_i8_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI17_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: and a10, a3, a9
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a11
+; XTENSA-ATOMIC-NEXT: or a9, a10, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB17_2
+; XTENSA-ATOMIC-NEXT: .LBB17_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB17_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB17_4
+; XTENSA-ATOMIC-NEXT: .LBB17_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB17_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB17_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB17_1
+; XTENSA-ATOMIC-NEXT: .LBB17_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw and ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_and_i8_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 4
+; XTENSA-NEXT: l32r a8, .LCPI18_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: and a10, a3, a9
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a11
+; XTENSA-ATOMIC-NEXT: or a9, a10, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB18_2
+; XTENSA-ATOMIC-NEXT: .LBB18_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB18_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB18_4
+; XTENSA-ATOMIC-NEXT: .LBB18_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB18_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB18_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB18_1
+; XTENSA-ATOMIC-NEXT: .LBB18_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw and ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_and_i8_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI19_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_and_i8_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: and a10, a3, a9
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a11
+; XTENSA-ATOMIC-NEXT: or a9, a10, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB19_2
+; XTENSA-ATOMIC-NEXT: .LBB19_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB19_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB19_4
+; XTENSA-ATOMIC-NEXT: .LBB19_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB19_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB19_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB19_1
+; XTENSA-ATOMIC-NEXT: .LBB19_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw and ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_nand_i8_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI20_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a10, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a12, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a13, -4
+; XTENSA-ATOMIC-NEXT: and a13, a2, a13
+; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 0
+; XTENSA-ATOMIC-NEXT: movi a15, 1
+; XTENSA-ATOMIC-NEXT: j .LBB20_2
+; XTENSA-ATOMIC-NEXT: .LBB20_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB20_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a6, a6
+; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB20_4
+; XTENSA-ATOMIC-NEXT: .LBB20_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a6, a7, a12
+; XTENSA-ATOMIC-NEXT: and a5, a7, a9
+; XTENSA-ATOMIC-NEXT: xor a5, a5, a11
+; XTENSA-ATOMIC-NEXT: and a5, a5, a10
+; XTENSA-ATOMIC-NEXT: or a6, a6, a5
+; XTENSA-ATOMIC-NEXT: wsr a7, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0
+; XTENSA-ATOMIC-NEXT: or a5, a15, a15
+; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB20_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB20_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a5, a14, a14
+; XTENSA-ATOMIC-NEXT: j .LBB20_1
+; XTENSA-ATOMIC-NEXT: .LBB20_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a6
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw nand ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_nand_i8_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 2
+; XTENSA-NEXT: l32r a8, .LCPI21_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a10, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a12, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a13, -4
+; XTENSA-ATOMIC-NEXT: and a13, a2, a13
+; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 0
+; XTENSA-ATOMIC-NEXT: movi a15, 1
+; XTENSA-ATOMIC-NEXT: j .LBB21_2
+; XTENSA-ATOMIC-NEXT: .LBB21_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB21_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a6, a6
+; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB21_4
+; XTENSA-ATOMIC-NEXT: .LBB21_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a6, a7, a12
+; XTENSA-ATOMIC-NEXT: and a5, a7, a9
+; XTENSA-ATOMIC-NEXT: xor a5, a5, a11
+; XTENSA-ATOMIC-NEXT: and a5, a5, a10
+; XTENSA-ATOMIC-NEXT: or a6, a6, a5
+; XTENSA-ATOMIC-NEXT: wsr a7, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0
+; XTENSA-ATOMIC-NEXT: or a5, a15, a15
+; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB21_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB21_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a5, a14, a14
+; XTENSA-ATOMIC-NEXT: j .LBB21_1
+; XTENSA-ATOMIC-NEXT: .LBB21_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a6
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw nand ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_nand_i8_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI22_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a10, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a12, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a13, -4
+; XTENSA-ATOMIC-NEXT: and a13, a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 0
+; XTENSA-ATOMIC-NEXT: movi a15, 1
+; XTENSA-ATOMIC-NEXT: j .LBB22_2
+; XTENSA-ATOMIC-NEXT: .LBB22_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB22_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a6, a6
+; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB22_4
+; XTENSA-ATOMIC-NEXT: .LBB22_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a6, a7, a12
+; XTENSA-ATOMIC-NEXT: and a5, a7, a9
+; XTENSA-ATOMIC-NEXT: xor a5, a5, a11
+; XTENSA-ATOMIC-NEXT: and a5, a5, a10
+; XTENSA-ATOMIC-NEXT: or a6, a6, a5
+; XTENSA-ATOMIC-NEXT: wsr a7, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0
+; XTENSA-ATOMIC-NEXT: or a5, a15, a15
+; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB22_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB22_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a5, a14, a14
+; XTENSA-ATOMIC-NEXT: j .LBB22_1
+; XTENSA-ATOMIC-NEXT: .LBB22_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a6
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw nand ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_nand_i8_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 4
+; XTENSA-NEXT: l32r a8, .LCPI23_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a10, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a12, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a13, -4
+; XTENSA-ATOMIC-NEXT: and a13, a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 0
+; XTENSA-ATOMIC-NEXT: movi a15, 1
+; XTENSA-ATOMIC-NEXT: j .LBB23_2
+; XTENSA-ATOMIC-NEXT: .LBB23_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a6, a6
+; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB23_4
+; XTENSA-ATOMIC-NEXT: .LBB23_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a6, a7, a12
+; XTENSA-ATOMIC-NEXT: and a5, a7, a9
+; XTENSA-ATOMIC-NEXT: xor a5, a5, a11
+; XTENSA-ATOMIC-NEXT: and a5, a5, a10
+; XTENSA-ATOMIC-NEXT: or a6, a6, a5
+; XTENSA-ATOMIC-NEXT: wsr a7, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0
+; XTENSA-ATOMIC-NEXT: or a5, a15, a15
+; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB23_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a5, a14, a14
+; XTENSA-ATOMIC-NEXT: j .LBB23_1
+; XTENSA-ATOMIC-NEXT: .LBB23_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a6
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw nand ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_nand_i8_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI24_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i8_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a10, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a12, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a13, -4
+; XTENSA-ATOMIC-NEXT: and a13, a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 0
+; XTENSA-ATOMIC-NEXT: movi a15, 1
+; XTENSA-ATOMIC-NEXT: j .LBB24_2
+; XTENSA-ATOMIC-NEXT: .LBB24_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a6, a6
+; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB24_4
+; XTENSA-ATOMIC-NEXT: .LBB24_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a6, a7, a12
+; XTENSA-ATOMIC-NEXT: and a5, a7, a9
+; XTENSA-ATOMIC-NEXT: xor a5, a5, a11
+; XTENSA-ATOMIC-NEXT: and a5, a5, a10
+; XTENSA-ATOMIC-NEXT: or a6, a6, a5
+; XTENSA-ATOMIC-NEXT: wsr a7, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0
+; XTENSA-ATOMIC-NEXT: or a5, a15, a15
+; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB24_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a5, a14, a14
+; XTENSA-ATOMIC-NEXT: j .LBB24_1
+; XTENSA-ATOMIC-NEXT: .LBB24_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a6
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw nand ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_or_i8_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI25_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a8, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a8
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a10, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB25_2
+; XTENSA-ATOMIC-NEXT: .LBB25_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB25_4
+; XTENSA-ATOMIC-NEXT: .LBB25_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB25_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB25_1
+; XTENSA-ATOMIC-NEXT: .LBB25_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw or ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_or_i8_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 2
+; XTENSA-NEXT: l32r a8, .LCPI26_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a8, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a8
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a10, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB26_2
+; XTENSA-ATOMIC-NEXT: .LBB26_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB26_4
+; XTENSA-ATOMIC-NEXT: .LBB26_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB26_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB26_1
+; XTENSA-ATOMIC-NEXT: .LBB26_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw or ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_or_i8_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI27_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a8, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a8
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a10, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB27_2
+; XTENSA-ATOMIC-NEXT: .LBB27_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB27_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB27_4
+; XTENSA-ATOMIC-NEXT: .LBB27_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB27_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB27_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB27_1
+; XTENSA-ATOMIC-NEXT: .LBB27_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw or ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_or_i8_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 4
+; XTENSA-NEXT: l32r a8, .LCPI28_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a8, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a8
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a10, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB28_2
+; XTENSA-ATOMIC-NEXT: .LBB28_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB28_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB28_4
+; XTENSA-ATOMIC-NEXT: .LBB28_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB28_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB28_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB28_1
+; XTENSA-ATOMIC-NEXT: .LBB28_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw or ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_or_i8_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI29_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_or_i8_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a8, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a8
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a10, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB29_2
+; XTENSA-ATOMIC-NEXT: .LBB29_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB29_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB29_4
+; XTENSA-ATOMIC-NEXT: .LBB29_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB29_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB29_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB29_1
+; XTENSA-ATOMIC-NEXT: .LBB29_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw or ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xor_i8_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI30_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a8, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a8
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a10, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB30_2
+; XTENSA-ATOMIC-NEXT: .LBB30_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB30_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB30_4
+; XTENSA-ATOMIC-NEXT: .LBB30_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: xor a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB30_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB30_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB30_1
+; XTENSA-ATOMIC-NEXT: .LBB30_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xor ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xor_i8_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 2
+; XTENSA-NEXT: l32r a8, .LCPI31_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a8, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a8
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a10, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB31_2
+; XTENSA-ATOMIC-NEXT: .LBB31_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB31_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB31_4
+; XTENSA-ATOMIC-NEXT: .LBB31_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: xor a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB31_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB31_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB31_1
+; XTENSA-ATOMIC-NEXT: .LBB31_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xor ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xor_i8_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI32_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a8, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a8
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a10, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB32_2
+; XTENSA-ATOMIC-NEXT: .LBB32_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB32_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB32_4
+; XTENSA-ATOMIC-NEXT: .LBB32_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: xor a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB32_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB32_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB32_1
+; XTENSA-ATOMIC-NEXT: .LBB32_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xor ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xor_i8_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 4
+; XTENSA-NEXT: l32r a8, .LCPI33_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a8, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a8
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a10, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB33_2
+; XTENSA-ATOMIC-NEXT: .LBB33_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB33_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB33_4
+; XTENSA-ATOMIC-NEXT: .LBB33_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: xor a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB33_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB33_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB33_1
+; XTENSA-ATOMIC-NEXT: .LBB33_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xor ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xor_i8_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI34_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i8_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a8, 255
+; XTENSA-ATOMIC-NEXT: and a9, a3, a8
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a10, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB34_2
+; XTENSA-ATOMIC-NEXT: .LBB34_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB34_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB34_4
+; XTENSA-ATOMIC-NEXT: .LBB34_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: xor a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB34_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB34_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB34_1
+; XTENSA-ATOMIC-NEXT: .LBB34_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xor ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_max_i8_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l8ui a2, a6, 0
+; XTENSA-NEXT: slli a8, a3, 24
+; XTENSA-NEXT: srai a5, a8, 24
+; XTENSA-NEXT: movi a7, 0
+; XTENSA-NEXT: l32r a4, .LCPI35_0
+; XTENSA-NEXT: j .LBB35_2
+; XTENSA-NEXT: .LBB35_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB35_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l8ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB35_4
+; XTENSA-NEXT: .LBB35_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s8i a2, a1, 0
+; XTENSA-NEXT: slli a8, a2, 24
+; XTENSA-NEXT: srai a8, a8, 24
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bge a5, a8, .LBB35_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB35_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB35_1
+; XTENSA-NEXT: .LBB35_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_max_i8_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: slli a12, a3, 24
+; XTENSA-ATOMIC-NEXT: srai a12, a12, 24
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB35_2
+; XTENSA-ATOMIC-NEXT: .LBB35_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB35_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB35_6
+; XTENSA-ATOMIC-NEXT: .LBB35_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a7, a15
+; XTENSA-ATOMIC-NEXT: slli a6, a7, 24
+; XTENSA-ATOMIC-NEXT: srai a5, a6, 24
+; XTENSA-ATOMIC-NEXT: or a6, a3, a3
+; XTENSA-ATOMIC-NEXT: bge a12, a5, .LBB35_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB35_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a7, a7
+; XTENSA-ATOMIC-NEXT: .LBB35_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB35_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a6, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a7, a7
+; XTENSA-ATOMIC-NEXT: and a6, a15, a10
+; XTENSA-ATOMIC-NEXT: or a7, a6, a7
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB35_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB35_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB35_1
+; XTENSA-ATOMIC-NEXT: .LBB35_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw max ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_max_i8_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l8ui a2, a6, 0
+; XTENSA-NEXT: slli a8, a3, 24
+; XTENSA-NEXT: srai a5, a8, 24
+; XTENSA-NEXT: movi a7, 2
+; XTENSA-NEXT: l32r a4, .LCPI36_0
+; XTENSA-NEXT: j .LBB36_2
+; XTENSA-NEXT: .LBB36_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB36_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l8ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB36_4
+; XTENSA-NEXT: .LBB36_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s8i a2, a1, 0
+; XTENSA-NEXT: slli a8, a2, 24
+; XTENSA-NEXT: srai a8, a8, 24
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bge a5, a8, .LBB36_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB36_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB36_1
+; XTENSA-NEXT: .LBB36_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_max_i8_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: slli a12, a3, 24
+; XTENSA-ATOMIC-NEXT: srai a12, a12, 24
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB36_2
+; XTENSA-ATOMIC-NEXT: .LBB36_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB36_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB36_6
+; XTENSA-ATOMIC-NEXT: .LBB36_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a7, a15
+; XTENSA-ATOMIC-NEXT: slli a6, a7, 24
+; XTENSA-ATOMIC-NEXT: srai a5, a6, 24
+; XTENSA-ATOMIC-NEXT: or a6, a3, a3
+; XTENSA-ATOMIC-NEXT: bge a12, a5, .LBB36_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB36_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a7, a7
+; XTENSA-ATOMIC-NEXT: .LBB36_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB36_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a6, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a7, a7
+; XTENSA-ATOMIC-NEXT: and a6, a15, a10
+; XTENSA-ATOMIC-NEXT: or a7, a6, a7
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB36_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB36_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB36_1
+; XTENSA-ATOMIC-NEXT: .LBB36_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw max ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_max_i8_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a9, a2, a2
+; XTENSA-NEXT: l8ui a2, a9, 0
+; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill
+; XTENSA-NEXT: slli a8, a3, 24
+; XTENSA-NEXT: or a3, a9, a9
+; XTENSA-NEXT: srai a4, a8, 24
+; XTENSA-NEXT: movi a7, 3
+; XTENSA-NEXT: movi a6, 0
+; XTENSA-NEXT: l32r a5, .LCPI37_0
+; XTENSA-NEXT: j .LBB37_2
+; XTENSA-NEXT: .LBB37_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB37_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 4
+; XTENSA-NEXT: or a10, a3, a3
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l8ui a2, a1, 4
+; XTENSA-NEXT: bnez a10, .LBB37_4
+; XTENSA-NEXT: .LBB37_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s8i a2, a1, 4
+; XTENSA-NEXT: slli a8, a2, 24
+; XTENSA-NEXT: srai a8, a8, 24
+; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload
+; XTENSA-NEXT: bge a4, a8, .LBB37_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB37_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB37_1
+; XTENSA-NEXT: .LBB37_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_max_i8_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: slli a12, a3, 24
+; XTENSA-ATOMIC-NEXT: srai a12, a12, 24
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB37_2
+; XTENSA-ATOMIC-NEXT: .LBB37_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB37_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB37_6
+; XTENSA-ATOMIC-NEXT: .LBB37_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a7, a15
+; XTENSA-ATOMIC-NEXT: slli a6, a7, 24
+; XTENSA-ATOMIC-NEXT: srai a5, a6, 24
+; XTENSA-ATOMIC-NEXT: or a6, a3, a3
+; XTENSA-ATOMIC-NEXT: bge a12, a5, .LBB37_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB37_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a7, a7
+; XTENSA-ATOMIC-NEXT: .LBB37_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB37_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a6, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a7, a7
+; XTENSA-ATOMIC-NEXT: and a6, a15, a10
+; XTENSA-ATOMIC-NEXT: or a7, a6, a7
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB37_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB37_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB37_1
+; XTENSA-ATOMIC-NEXT: .LBB37_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw max ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_max_i8_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a9, a2, a2
+; XTENSA-NEXT: l8ui a2, a9, 0
+; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill
+; XTENSA-NEXT: slli a8, a3, 24
+; XTENSA-NEXT: or a3, a9, a9
+; XTENSA-NEXT: srai a4, a8, 24
+; XTENSA-NEXT: movi a7, 4
+; XTENSA-NEXT: movi a6, 2
+; XTENSA-NEXT: l32r a5, .LCPI38_0
+; XTENSA-NEXT: j .LBB38_2
+; XTENSA-NEXT: .LBB38_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB38_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 4
+; XTENSA-NEXT: or a10, a3, a3
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l8ui a2, a1, 4
+; XTENSA-NEXT: bnez a10, .LBB38_4
+; XTENSA-NEXT: .LBB38_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s8i a2, a1, 4
+; XTENSA-NEXT: slli a8, a2, 24
+; XTENSA-NEXT: srai a8, a8, 24
+; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload
+; XTENSA-NEXT: bge a4, a8, .LBB38_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB38_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB38_1
+; XTENSA-NEXT: .LBB38_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_max_i8_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: slli a12, a3, 24
+; XTENSA-ATOMIC-NEXT: srai a12, a12, 24
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB38_2
+; XTENSA-ATOMIC-NEXT: .LBB38_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB38_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB38_6
+; XTENSA-ATOMIC-NEXT: .LBB38_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a7, a15
+; XTENSA-ATOMIC-NEXT: slli a6, a7, 24
+; XTENSA-ATOMIC-NEXT: srai a5, a6, 24
+; XTENSA-ATOMIC-NEXT: or a6, a3, a3
+; XTENSA-ATOMIC-NEXT: bge a12, a5, .LBB38_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB38_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a7, a7
+; XTENSA-ATOMIC-NEXT: .LBB38_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB38_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a6, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a7, a7
+; XTENSA-ATOMIC-NEXT: and a6, a15, a10
+; XTENSA-ATOMIC-NEXT: or a7, a6, a7
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB38_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB38_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB38_1
+; XTENSA-ATOMIC-NEXT: .LBB38_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw max ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_max_i8_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l8ui a2, a6, 0
+; XTENSA-NEXT: slli a8, a3, 24
+; XTENSA-NEXT: srai a5, a8, 24
+; XTENSA-NEXT: movi a7, 5
+; XTENSA-NEXT: l32r a4, .LCPI39_0
+; XTENSA-NEXT: j .LBB39_2
+; XTENSA-NEXT: .LBB39_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB39_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l8ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB39_4
+; XTENSA-NEXT: .LBB39_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s8i a2, a1, 0
+; XTENSA-NEXT: slli a8, a2, 24
+; XTENSA-NEXT: srai a8, a8, 24
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bge a5, a8, .LBB39_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB39_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB39_1
+; XTENSA-NEXT: .LBB39_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_max_i8_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: slli a12, a3, 24
+; XTENSA-ATOMIC-NEXT: srai a12, a12, 24
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB39_2
+; XTENSA-ATOMIC-NEXT: .LBB39_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB39_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB39_6
+; XTENSA-ATOMIC-NEXT: .LBB39_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a7, a15
+; XTENSA-ATOMIC-NEXT: slli a6, a7, 24
+; XTENSA-ATOMIC-NEXT: srai a5, a6, 24
+; XTENSA-ATOMIC-NEXT: or a6, a3, a3
+; XTENSA-ATOMIC-NEXT: bge a12, a5, .LBB39_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB39_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a7, a7
+; XTENSA-ATOMIC-NEXT: .LBB39_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB39_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a6, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a7, a7
+; XTENSA-ATOMIC-NEXT: and a6, a15, a10
+; XTENSA-ATOMIC-NEXT: or a7, a6, a7
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB39_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB39_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB39_1
+; XTENSA-ATOMIC-NEXT: .LBB39_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw max ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_min_i8_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l8ui a2, a6, 0
+; XTENSA-NEXT: slli a8, a3, 24
+; XTENSA-NEXT: srai a5, a8, 24
+; XTENSA-NEXT: movi a7, 0
+; XTENSA-NEXT: l32r a4, .LCPI40_0
+; XTENSA-NEXT: j .LBB40_2
+; XTENSA-NEXT: .LBB40_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB40_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l8ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB40_4
+; XTENSA-NEXT: .LBB40_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s8i a2, a1, 0
+; XTENSA-NEXT: slli a8, a2, 24
+; XTENSA-NEXT: srai a8, a8, 24
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: blt a5, a8, .LBB40_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB40_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB40_1
+; XTENSA-NEXT: .LBB40_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_min_i8_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: slli a12, a3, 24
+; XTENSA-ATOMIC-NEXT: srai a12, a12, 24
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB40_2
+; XTENSA-ATOMIC-NEXT: .LBB40_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB40_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB40_6
+; XTENSA-ATOMIC-NEXT: .LBB40_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a7, a15
+; XTENSA-ATOMIC-NEXT: slli a6, a7, 24
+; XTENSA-ATOMIC-NEXT: srai a5, a6, 24
+; XTENSA-ATOMIC-NEXT: or a6, a3, a3
+; XTENSA-ATOMIC-NEXT: blt a12, a5, .LBB40_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB40_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a7, a7
+; XTENSA-ATOMIC-NEXT: .LBB40_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB40_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a6, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a7, a7
+; XTENSA-ATOMIC-NEXT: and a6, a15, a10
+; XTENSA-ATOMIC-NEXT: or a7, a6, a7
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB40_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB40_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB40_1
+; XTENSA-ATOMIC-NEXT: .LBB40_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw min ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_min_i8_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l8ui a2, a6, 0
+; XTENSA-NEXT: slli a8, a3, 24
+; XTENSA-NEXT: srai a5, a8, 24
+; XTENSA-NEXT: movi a7, 2
+; XTENSA-NEXT: l32r a4, .LCPI41_0
+; XTENSA-NEXT: j .LBB41_2
+; XTENSA-NEXT: .LBB41_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB41_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l8ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB41_4
+; XTENSA-NEXT: .LBB41_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s8i a2, a1, 0
+; XTENSA-NEXT: slli a8, a2, 24
+; XTENSA-NEXT: srai a8, a8, 24
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: blt a5, a8, .LBB41_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB41_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB41_1
+; XTENSA-NEXT: .LBB41_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_min_i8_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: slli a12, a3, 24
+; XTENSA-ATOMIC-NEXT: srai a12, a12, 24
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB41_2
+; XTENSA-ATOMIC-NEXT: .LBB41_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB41_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB41_6
+; XTENSA-ATOMIC-NEXT: .LBB41_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a7, a15
+; XTENSA-ATOMIC-NEXT: slli a6, a7, 24
+; XTENSA-ATOMIC-NEXT: srai a5, a6, 24
+; XTENSA-ATOMIC-NEXT: or a6, a3, a3
+; XTENSA-ATOMIC-NEXT: blt a12, a5, .LBB41_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB41_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a7, a7
+; XTENSA-ATOMIC-NEXT: .LBB41_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB41_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a6, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a7, a7
+; XTENSA-ATOMIC-NEXT: and a6, a15, a10
+; XTENSA-ATOMIC-NEXT: or a7, a6, a7
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB41_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB41_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB41_1
+; XTENSA-ATOMIC-NEXT: .LBB41_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw min ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_min_i8_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a9, a2, a2
+; XTENSA-NEXT: l8ui a2, a9, 0
+; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill
+; XTENSA-NEXT: slli a8, a3, 24
+; XTENSA-NEXT: or a3, a9, a9
+; XTENSA-NEXT: srai a4, a8, 24
+; XTENSA-NEXT: movi a7, 3
+; XTENSA-NEXT: movi a6, 0
+; XTENSA-NEXT: l32r a5, .LCPI42_0
+; XTENSA-NEXT: j .LBB42_2
+; XTENSA-NEXT: .LBB42_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB42_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 4
+; XTENSA-NEXT: or a10, a3, a3
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l8ui a2, a1, 4
+; XTENSA-NEXT: bnez a10, .LBB42_4
+; XTENSA-NEXT: .LBB42_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s8i a2, a1, 4
+; XTENSA-NEXT: slli a8, a2, 24
+; XTENSA-NEXT: srai a8, a8, 24
+; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload
+; XTENSA-NEXT: blt a4, a8, .LBB42_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB42_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB42_1
+; XTENSA-NEXT: .LBB42_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_min_i8_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: slli a12, a3, 24
+; XTENSA-ATOMIC-NEXT: srai a12, a12, 24
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB42_2
+; XTENSA-ATOMIC-NEXT: .LBB42_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB42_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB42_6
+; XTENSA-ATOMIC-NEXT: .LBB42_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a7, a15
+; XTENSA-ATOMIC-NEXT: slli a6, a7, 24
+; XTENSA-ATOMIC-NEXT: srai a5, a6, 24
+; XTENSA-ATOMIC-NEXT: or a6, a3, a3
+; XTENSA-ATOMIC-NEXT: blt a12, a5, .LBB42_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB42_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a7, a7
+; XTENSA-ATOMIC-NEXT: .LBB42_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB42_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a6, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a7, a7
+; XTENSA-ATOMIC-NEXT: and a6, a15, a10
+; XTENSA-ATOMIC-NEXT: or a7, a6, a7
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB42_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB42_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB42_1
+; XTENSA-ATOMIC-NEXT: .LBB42_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw min ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_min_i8_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a9, a2, a2
+; XTENSA-NEXT: l8ui a2, a9, 0
+; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill
+; XTENSA-NEXT: slli a8, a3, 24
+; XTENSA-NEXT: or a3, a9, a9
+; XTENSA-NEXT: srai a4, a8, 24
+; XTENSA-NEXT: movi a7, 4
+; XTENSA-NEXT: movi a6, 2
+; XTENSA-NEXT: l32r a5, .LCPI43_0
+; XTENSA-NEXT: j .LBB43_2
+; XTENSA-NEXT: .LBB43_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB43_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 4
+; XTENSA-NEXT: or a10, a3, a3
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l8ui a2, a1, 4
+; XTENSA-NEXT: bnez a10, .LBB43_4
+; XTENSA-NEXT: .LBB43_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s8i a2, a1, 4
+; XTENSA-NEXT: slli a8, a2, 24
+; XTENSA-NEXT: srai a8, a8, 24
+; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload
+; XTENSA-NEXT: blt a4, a8, .LBB43_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB43_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB43_1
+; XTENSA-NEXT: .LBB43_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_min_i8_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: slli a12, a3, 24
+; XTENSA-ATOMIC-NEXT: srai a12, a12, 24
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB43_2
+; XTENSA-ATOMIC-NEXT: .LBB43_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB43_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB43_6
+; XTENSA-ATOMIC-NEXT: .LBB43_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a7, a15
+; XTENSA-ATOMIC-NEXT: slli a6, a7, 24
+; XTENSA-ATOMIC-NEXT: srai a5, a6, 24
+; XTENSA-ATOMIC-NEXT: or a6, a3, a3
+; XTENSA-ATOMIC-NEXT: blt a12, a5, .LBB43_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB43_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a7, a7
+; XTENSA-ATOMIC-NEXT: .LBB43_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB43_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a6, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a7, a7
+; XTENSA-ATOMIC-NEXT: and a6, a15, a10
+; XTENSA-ATOMIC-NEXT: or a7, a6, a7
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB43_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB43_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB43_1
+; XTENSA-ATOMIC-NEXT: .LBB43_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw min ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_min_i8_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l8ui a2, a6, 0
+; XTENSA-NEXT: slli a8, a3, 24
+; XTENSA-NEXT: srai a5, a8, 24
+; XTENSA-NEXT: movi a7, 5
+; XTENSA-NEXT: l32r a4, .LCPI44_0
+; XTENSA-NEXT: j .LBB44_2
+; XTENSA-NEXT: .LBB44_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB44_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l8ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB44_4
+; XTENSA-NEXT: .LBB44_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s8i a2, a1, 0
+; XTENSA-NEXT: slli a8, a2, 24
+; XTENSA-NEXT: srai a8, a8, 24
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: blt a5, a8, .LBB44_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB44_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB44_1
+; XTENSA-NEXT: .LBB44_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_min_i8_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: slli a12, a3, 24
+; XTENSA-ATOMIC-NEXT: srai a12, a12, 24
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB44_2
+; XTENSA-ATOMIC-NEXT: .LBB44_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB44_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB44_6
+; XTENSA-ATOMIC-NEXT: .LBB44_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a7, a15
+; XTENSA-ATOMIC-NEXT: slli a6, a7, 24
+; XTENSA-ATOMIC-NEXT: srai a5, a6, 24
+; XTENSA-ATOMIC-NEXT: or a6, a3, a3
+; XTENSA-ATOMIC-NEXT: blt a12, a5, .LBB44_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB44_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a7, a7
+; XTENSA-ATOMIC-NEXT: .LBB44_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB44_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a6, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a7, a7
+; XTENSA-ATOMIC-NEXT: and a6, a15, a10
+; XTENSA-ATOMIC-NEXT: or a7, a6, a7
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB44_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB44_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB44_1
+; XTENSA-ATOMIC-NEXT: .LBB44_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw min ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umax_i8_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a8, a3, a3
+; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill
+; XTENSA-NEXT: l8ui a2, a2, 0
+; XTENSA-NEXT: movi a5, 255
+; XTENSA-NEXT: and a4, a8, a5
+; XTENSA-NEXT: movi a7, 0
+; XTENSA-NEXT: l32r a6, .LCPI45_0
+; XTENSA-NEXT: j .LBB45_2
+; XTENSA-NEXT: .LBB45_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB45_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 4
+; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a6
+; XTENSA-NEXT: l8ui a2, a1, 4
+; XTENSA-NEXT: bnez a10, .LBB45_4
+; XTENSA-NEXT: .LBB45_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s8i a2, a1, 4
+; XTENSA-NEXT: and a8, a2, a5
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bgeu a4, a8, .LBB45_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB45_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB45_1
+; XTENSA-NEXT: .LBB45_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i8_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: and a12, a3, a9
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB45_2
+; XTENSA-ATOMIC-NEXT: .LBB45_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB45_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB45_6
+; XTENSA-ATOMIC-NEXT: .LBB45_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a7, a15
+; XTENSA-ATOMIC-NEXT: and a5, a7, a9
+; XTENSA-ATOMIC-NEXT: or a6, a3, a3
+; XTENSA-ATOMIC-NEXT: bgeu a12, a5, .LBB45_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB45_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a7, a7
+; XTENSA-ATOMIC-NEXT: .LBB45_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB45_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a6, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a7, a7
+; XTENSA-ATOMIC-NEXT: and a6, a15, a10
+; XTENSA-ATOMIC-NEXT: or a7, a6, a7
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB45_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB45_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB45_1
+; XTENSA-ATOMIC-NEXT: .LBB45_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umax ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umax_i8_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a8, a3, a3
+; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill
+; XTENSA-NEXT: l8ui a2, a2, 0
+; XTENSA-NEXT: movi a5, 255
+; XTENSA-NEXT: and a4, a8, a5
+; XTENSA-NEXT: movi a7, 2
+; XTENSA-NEXT: l32r a6, .LCPI46_0
+; XTENSA-NEXT: j .LBB46_2
+; XTENSA-NEXT: .LBB46_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB46_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 4
+; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a6
+; XTENSA-NEXT: l8ui a2, a1, 4
+; XTENSA-NEXT: bnez a10, .LBB46_4
+; XTENSA-NEXT: .LBB46_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s8i a2, a1, 4
+; XTENSA-NEXT: and a8, a2, a5
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bgeu a4, a8, .LBB46_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB46_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB46_1
+; XTENSA-NEXT: .LBB46_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i8_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: and a12, a3, a9
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB46_2
+; XTENSA-ATOMIC-NEXT: .LBB46_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB46_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB46_6
+; XTENSA-ATOMIC-NEXT: .LBB46_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a7, a15
+; XTENSA-ATOMIC-NEXT: and a5, a7, a9
+; XTENSA-ATOMIC-NEXT: or a6, a3, a3
+; XTENSA-ATOMIC-NEXT: bgeu a12, a5, .LBB46_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB46_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a7, a7
+; XTENSA-ATOMIC-NEXT: .LBB46_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB46_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a6, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a7, a7
+; XTENSA-ATOMIC-NEXT: and a6, a15, a10
+; XTENSA-ATOMIC-NEXT: or a7, a6, a7
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB46_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB46_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB46_1
+; XTENSA-ATOMIC-NEXT: .LBB46_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umax ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umax_i8_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: s32i a2, a1, 4 # 4-byte Folded Spill
+; XTENSA-NEXT: l8ui a2, a2, 0
+; XTENSA-NEXT: movi a4, 255
+; XTENSA-NEXT: or a5, a3, a3
+; XTENSA-NEXT: and a8, a3, a4
+; XTENSA-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; XTENSA-NEXT: movi a7, 3
+; XTENSA-NEXT: movi a6, 0
+; XTENSA-NEXT: l32r a3, .LCPI47_0
+; XTENSA-NEXT: j .LBB47_2
+; XTENSA-NEXT: .LBB47_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB47_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 8
+; XTENSA-NEXT: l32i a10, a1, 4 # 4-byte Folded Reload
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a3
+; XTENSA-NEXT: l8ui a2, a1, 8
+; XTENSA-NEXT: bnez a10, .LBB47_4
+; XTENSA-NEXT: .LBB47_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s8i a2, a1, 8
+; XTENSA-NEXT: and a8, a2, a4
+; XTENSA-NEXT: or a12, a5, a5
+; XTENSA-NEXT: l32i a9, a1, 0 # 4-byte Folded Reload
+; XTENSA-NEXT: bgeu a9, a8, .LBB47_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB47_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB47_1
+; XTENSA-NEXT: .LBB47_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i8_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: and a12, a3, a9
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB47_2
+; XTENSA-ATOMIC-NEXT: .LBB47_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB47_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB47_6
+; XTENSA-ATOMIC-NEXT: .LBB47_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a7, a15
+; XTENSA-ATOMIC-NEXT: and a5, a7, a9
+; XTENSA-ATOMIC-NEXT: or a6, a3, a3
+; XTENSA-ATOMIC-NEXT: bgeu a12, a5, .LBB47_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB47_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a7, a7
+; XTENSA-ATOMIC-NEXT: .LBB47_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB47_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a6, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a7, a7
+; XTENSA-ATOMIC-NEXT: and a6, a15, a10
+; XTENSA-ATOMIC-NEXT: or a7, a6, a7
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB47_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB47_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB47_1
+; XTENSA-ATOMIC-NEXT: .LBB47_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umax ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umax_i8_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: s32i a2, a1, 4 # 4-byte Folded Spill
+; XTENSA-NEXT: l8ui a2, a2, 0
+; XTENSA-NEXT: movi a4, 255
+; XTENSA-NEXT: or a5, a3, a3
+; XTENSA-NEXT: and a8, a3, a4
+; XTENSA-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; XTENSA-NEXT: movi a7, 4
+; XTENSA-NEXT: movi a6, 2
+; XTENSA-NEXT: l32r a3, .LCPI48_0
+; XTENSA-NEXT: j .LBB48_2
+; XTENSA-NEXT: .LBB48_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB48_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 8
+; XTENSA-NEXT: l32i a10, a1, 4 # 4-byte Folded Reload
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a3
+; XTENSA-NEXT: l8ui a2, a1, 8
+; XTENSA-NEXT: bnez a10, .LBB48_4
+; XTENSA-NEXT: .LBB48_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s8i a2, a1, 8
+; XTENSA-NEXT: and a8, a2, a4
+; XTENSA-NEXT: or a12, a5, a5
+; XTENSA-NEXT: l32i a9, a1, 0 # 4-byte Folded Reload
+; XTENSA-NEXT: bgeu a9, a8, .LBB48_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB48_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB48_1
+; XTENSA-NEXT: .LBB48_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i8_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: and a12, a3, a9
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB48_2
+; XTENSA-ATOMIC-NEXT: .LBB48_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB48_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB48_6
+; XTENSA-ATOMIC-NEXT: .LBB48_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a7, a15
+; XTENSA-ATOMIC-NEXT: and a5, a7, a9
+; XTENSA-ATOMIC-NEXT: or a6, a3, a3
+; XTENSA-ATOMIC-NEXT: bgeu a12, a5, .LBB48_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB48_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a7, a7
+; XTENSA-ATOMIC-NEXT: .LBB48_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB48_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a6, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a7, a7
+; XTENSA-ATOMIC-NEXT: and a6, a15, a10
+; XTENSA-ATOMIC-NEXT: or a7, a6, a7
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB48_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB48_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB48_1
+; XTENSA-ATOMIC-NEXT: .LBB48_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umax ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umax_i8_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a8, a3, a3
+; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill
+; XTENSA-NEXT: l8ui a2, a2, 0
+; XTENSA-NEXT: movi a5, 255
+; XTENSA-NEXT: and a4, a8, a5
+; XTENSA-NEXT: movi a7, 5
+; XTENSA-NEXT: l32r a6, .LCPI49_0
+; XTENSA-NEXT: j .LBB49_2
+; XTENSA-NEXT: .LBB49_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB49_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 4
+; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a6
+; XTENSA-NEXT: l8ui a2, a1, 4
+; XTENSA-NEXT: bnez a10, .LBB49_4
+; XTENSA-NEXT: .LBB49_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s8i a2, a1, 4
+; XTENSA-NEXT: and a8, a2, a5
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bgeu a4, a8, .LBB49_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB49_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB49_1
+; XTENSA-NEXT: .LBB49_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i8_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: and a12, a3, a9
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB49_2
+; XTENSA-ATOMIC-NEXT: .LBB49_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB49_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB49_6
+; XTENSA-ATOMIC-NEXT: .LBB49_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a7, a15
+; XTENSA-ATOMIC-NEXT: and a5, a7, a9
+; XTENSA-ATOMIC-NEXT: or a6, a3, a3
+; XTENSA-ATOMIC-NEXT: bgeu a12, a5, .LBB49_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB49_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a7, a7
+; XTENSA-ATOMIC-NEXT: .LBB49_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB49_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a6, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a7, a7
+; XTENSA-ATOMIC-NEXT: and a6, a15, a10
+; XTENSA-ATOMIC-NEXT: or a7, a6, a7
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB49_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB49_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB49_1
+; XTENSA-ATOMIC-NEXT: .LBB49_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umax ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umin_i8_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a8, a3, a3
+; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill
+; XTENSA-NEXT: l8ui a2, a2, 0
+; XTENSA-NEXT: movi a5, 255
+; XTENSA-NEXT: and a4, a8, a5
+; XTENSA-NEXT: movi a7, 0
+; XTENSA-NEXT: l32r a6, .LCPI50_0
+; XTENSA-NEXT: j .LBB50_2
+; XTENSA-NEXT: .LBB50_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB50_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 4
+; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a6
+; XTENSA-NEXT: l8ui a2, a1, 4
+; XTENSA-NEXT: bnez a10, .LBB50_4
+; XTENSA-NEXT: .LBB50_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s8i a2, a1, 4
+; XTENSA-NEXT: and a8, a2, a5
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bltu a4, a8, .LBB50_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB50_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB50_1
+; XTENSA-NEXT: .LBB50_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i8_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: and a12, a3, a9
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB50_2
+; XTENSA-ATOMIC-NEXT: .LBB50_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB50_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB50_6
+; XTENSA-ATOMIC-NEXT: .LBB50_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a7, a15
+; XTENSA-ATOMIC-NEXT: and a5, a7, a9
+; XTENSA-ATOMIC-NEXT: or a6, a3, a3
+; XTENSA-ATOMIC-NEXT: bltu a12, a5, .LBB50_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB50_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a7, a7
+; XTENSA-ATOMIC-NEXT: .LBB50_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB50_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a6, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a7, a7
+; XTENSA-ATOMIC-NEXT: and a6, a15, a10
+; XTENSA-ATOMIC-NEXT: or a7, a6, a7
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB50_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB50_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB50_1
+; XTENSA-ATOMIC-NEXT: .LBB50_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umin ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umin_i8_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a8, a3, a3
+; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill
+; XTENSA-NEXT: l8ui a2, a2, 0
+; XTENSA-NEXT: movi a5, 255
+; XTENSA-NEXT: and a4, a8, a5
+; XTENSA-NEXT: movi a7, 2
+; XTENSA-NEXT: l32r a6, .LCPI51_0
+; XTENSA-NEXT: j .LBB51_2
+; XTENSA-NEXT: .LBB51_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB51_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 4
+; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a6
+; XTENSA-NEXT: l8ui a2, a1, 4
+; XTENSA-NEXT: bnez a10, .LBB51_4
+; XTENSA-NEXT: .LBB51_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s8i a2, a1, 4
+; XTENSA-NEXT: and a8, a2, a5
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bltu a4, a8, .LBB51_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB51_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB51_1
+; XTENSA-NEXT: .LBB51_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i8_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: and a12, a3, a9
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB51_2
+; XTENSA-ATOMIC-NEXT: .LBB51_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB51_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB51_6
+; XTENSA-ATOMIC-NEXT: .LBB51_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a7, a15
+; XTENSA-ATOMIC-NEXT: and a5, a7, a9
+; XTENSA-ATOMIC-NEXT: or a6, a3, a3
+; XTENSA-ATOMIC-NEXT: bltu a12, a5, .LBB51_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB51_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a7, a7
+; XTENSA-ATOMIC-NEXT: .LBB51_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB51_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a6, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a7, a7
+; XTENSA-ATOMIC-NEXT: and a6, a15, a10
+; XTENSA-ATOMIC-NEXT: or a7, a6, a7
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB51_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB51_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB51_1
+; XTENSA-ATOMIC-NEXT: .LBB51_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umin ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umin_i8_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: s32i a2, a1, 4 # 4-byte Folded Spill
+; XTENSA-NEXT: l8ui a2, a2, 0
+; XTENSA-NEXT: movi a4, 255
+; XTENSA-NEXT: or a5, a3, a3
+; XTENSA-NEXT: and a8, a3, a4
+; XTENSA-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; XTENSA-NEXT: movi a7, 3
+; XTENSA-NEXT: movi a6, 0
+; XTENSA-NEXT: l32r a3, .LCPI52_0
+; XTENSA-NEXT: j .LBB52_2
+; XTENSA-NEXT: .LBB52_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB52_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 8
+; XTENSA-NEXT: l32i a10, a1, 4 # 4-byte Folded Reload
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a3
+; XTENSA-NEXT: l8ui a2, a1, 8
+; XTENSA-NEXT: bnez a10, .LBB52_4
+; XTENSA-NEXT: .LBB52_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s8i a2, a1, 8
+; XTENSA-NEXT: and a8, a2, a4
+; XTENSA-NEXT: or a12, a5, a5
+; XTENSA-NEXT: l32i a9, a1, 0 # 4-byte Folded Reload
+; XTENSA-NEXT: bltu a9, a8, .LBB52_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB52_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB52_1
+; XTENSA-NEXT: .LBB52_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i8_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: and a12, a3, a9
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB52_2
+; XTENSA-ATOMIC-NEXT: .LBB52_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB52_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB52_6
+; XTENSA-ATOMIC-NEXT: .LBB52_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a7, a15
+; XTENSA-ATOMIC-NEXT: and a5, a7, a9
+; XTENSA-ATOMIC-NEXT: or a6, a3, a3
+; XTENSA-ATOMIC-NEXT: bltu a12, a5, .LBB52_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB52_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a7, a7
+; XTENSA-ATOMIC-NEXT: .LBB52_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB52_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a6, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a7, a7
+; XTENSA-ATOMIC-NEXT: and a6, a15, a10
+; XTENSA-ATOMIC-NEXT: or a7, a6, a7
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB52_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB52_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB52_1
+; XTENSA-ATOMIC-NEXT: .LBB52_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umin ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umin_i8_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: s32i a2, a1, 4 # 4-byte Folded Spill
+; XTENSA-NEXT: l8ui a2, a2, 0
+; XTENSA-NEXT: movi a4, 255
+; XTENSA-NEXT: or a5, a3, a3
+; XTENSA-NEXT: and a8, a3, a4
+; XTENSA-NEXT: s32i a8, a1, 0 # 4-byte Folded Spill
+; XTENSA-NEXT: movi a7, 4
+; XTENSA-NEXT: movi a6, 2
+; XTENSA-NEXT: l32r a3, .LCPI53_0
+; XTENSA-NEXT: j .LBB53_2
+; XTENSA-NEXT: .LBB53_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB53_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 8
+; XTENSA-NEXT: l32i a10, a1, 4 # 4-byte Folded Reload
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a3
+; XTENSA-NEXT: l8ui a2, a1, 8
+; XTENSA-NEXT: bnez a10, .LBB53_4
+; XTENSA-NEXT: .LBB53_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s8i a2, a1, 8
+; XTENSA-NEXT: and a8, a2, a4
+; XTENSA-NEXT: or a12, a5, a5
+; XTENSA-NEXT: l32i a9, a1, 0 # 4-byte Folded Reload
+; XTENSA-NEXT: bltu a9, a8, .LBB53_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB53_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB53_1
+; XTENSA-NEXT: .LBB53_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i8_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: and a12, a3, a9
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB53_2
+; XTENSA-ATOMIC-NEXT: .LBB53_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB53_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB53_6
+; XTENSA-ATOMIC-NEXT: .LBB53_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a7, a15
+; XTENSA-ATOMIC-NEXT: and a5, a7, a9
+; XTENSA-ATOMIC-NEXT: or a6, a3, a3
+; XTENSA-ATOMIC-NEXT: bltu a12, a5, .LBB53_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB53_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a7, a7
+; XTENSA-ATOMIC-NEXT: .LBB53_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB53_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a6, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a7, a7
+; XTENSA-ATOMIC-NEXT: and a6, a15, a10
+; XTENSA-ATOMIC-NEXT: or a7, a6, a7
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB53_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB53_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB53_1
+; XTENSA-ATOMIC-NEXT: .LBB53_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umin ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umin_i8_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a8, a3, a3
+; XTENSA-NEXT: s32i a2, a1, 0 # 4-byte Folded Spill
+; XTENSA-NEXT: l8ui a2, a2, 0
+; XTENSA-NEXT: movi a5, 255
+; XTENSA-NEXT: and a4, a8, a5
+; XTENSA-NEXT: movi a7, 5
+; XTENSA-NEXT: l32r a6, .LCPI54_0
+; XTENSA-NEXT: j .LBB54_2
+; XTENSA-NEXT: .LBB54_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB54_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 4
+; XTENSA-NEXT: l32i a10, a1, 0 # 4-byte Folded Reload
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a6
+; XTENSA-NEXT: l8ui a2, a1, 4
+; XTENSA-NEXT: bnez a10, .LBB54_4
+; XTENSA-NEXT: .LBB54_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s8i a2, a1, 4
+; XTENSA-NEXT: and a8, a2, a5
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bltu a4, a8, .LBB54_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB54_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB54_1
+; XTENSA-NEXT: .LBB54_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i8_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: and a12, a3, a9
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB54_2
+; XTENSA-ATOMIC-NEXT: .LBB54_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB54_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB54_6
+; XTENSA-ATOMIC-NEXT: .LBB54_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a7, a15
+; XTENSA-ATOMIC-NEXT: and a5, a7, a9
+; XTENSA-ATOMIC-NEXT: or a6, a3, a3
+; XTENSA-ATOMIC-NEXT: bltu a12, a5, .LBB54_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB54_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a7, a7
+; XTENSA-ATOMIC-NEXT: .LBB54_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB54_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a6, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a7, a7
+; XTENSA-ATOMIC-NEXT: and a6, a15, a10
+; XTENSA-ATOMIC-NEXT: or a7, a6, a7
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a11, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB54_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB54_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB54_1
+; XTENSA-ATOMIC-NEXT: .LBB54_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umin ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xchg_i16_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI55_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI55_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 1
+; XTENSA-ATOMIC-NEXT: j .LBB55_2
+; XTENSA-ATOMIC-NEXT: .LBB55_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB55_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a14, a14
+; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB55_4
+; XTENSA-ATOMIC-NEXT: .LBB55_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a14, a15, a10
+; XTENSA-ATOMIC-NEXT: or a14, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB55_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB55_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB55_1
+; XTENSA-ATOMIC-NEXT: .LBB55_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xchg ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_xchg_i16_acquire(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xchg_i16_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 2
+; XTENSA-NEXT: l32r a8, .LCPI56_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI56_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 1
+; XTENSA-ATOMIC-NEXT: j .LBB56_2
+; XTENSA-ATOMIC-NEXT: .LBB56_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB56_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a14, a14
+; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB56_4
+; XTENSA-ATOMIC-NEXT: .LBB56_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a14, a15, a10
+; XTENSA-ATOMIC-NEXT: or a14, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB56_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB56_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB56_1
+; XTENSA-ATOMIC-NEXT: .LBB56_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xchg ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_xchg_i16_release(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xchg_i16_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI57_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI57_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 1
+; XTENSA-ATOMIC-NEXT: j .LBB57_2
+; XTENSA-ATOMIC-NEXT: .LBB57_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB57_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a14, a14
+; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB57_4
+; XTENSA-ATOMIC-NEXT: .LBB57_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a14, a15, a10
+; XTENSA-ATOMIC-NEXT: or a14, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB57_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB57_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB57_1
+; XTENSA-ATOMIC-NEXT: .LBB57_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xchg ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_xchg_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xchg_i16_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 4
+; XTENSA-NEXT: l32r a8, .LCPI58_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI58_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 1
+; XTENSA-ATOMIC-NEXT: j .LBB58_2
+; XTENSA-ATOMIC-NEXT: .LBB58_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB58_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a14, a14
+; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB58_4
+; XTENSA-ATOMIC-NEXT: .LBB58_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a14, a15, a10
+; XTENSA-ATOMIC-NEXT: or a14, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB58_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB58_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB58_1
+; XTENSA-ATOMIC-NEXT: .LBB58_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xchg ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xchg_i16_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI59_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i16_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI59_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a10, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a11, -4
+; XTENSA-ATOMIC-NEXT: and a11, a2, a11
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 1
+; XTENSA-ATOMIC-NEXT: j .LBB59_2
+; XTENSA-ATOMIC-NEXT: .LBB59_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB59_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a14, a14
+; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB59_4
+; XTENSA-ATOMIC-NEXT: .LBB59_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a14, a15, a10
+; XTENSA-ATOMIC-NEXT: or a14, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a11, 0
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB59_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB59_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB59_1
+; XTENSA-ATOMIC-NEXT: .LBB59_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xchg ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_add_i16_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI60_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI60_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a11, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -4
+; XTENSA-ATOMIC-NEXT: and a12, a2, a12
+; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB60_2
+; XTENSA-ATOMIC-NEXT: .LBB60_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB60_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB60_4
+; XTENSA-ATOMIC-NEXT: .LBB60_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a11
+; XTENSA-ATOMIC-NEXT: add a6, a15, a9
+; XTENSA-ATOMIC-NEXT: and a6, a6, a10
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB60_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB60_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB60_1
+; XTENSA-ATOMIC-NEXT: .LBB60_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw add ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_add_i16_acquire(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_add_i16_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 2
+; XTENSA-NEXT: l32r a8, .LCPI61_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI61_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a11, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -4
+; XTENSA-ATOMIC-NEXT: and a12, a2, a12
+; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB61_2
+; XTENSA-ATOMIC-NEXT: .LBB61_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB61_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB61_4
+; XTENSA-ATOMIC-NEXT: .LBB61_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a11
+; XTENSA-ATOMIC-NEXT: add a6, a15, a9
+; XTENSA-ATOMIC-NEXT: and a6, a6, a10
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB61_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB61_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB61_1
+; XTENSA-ATOMIC-NEXT: .LBB61_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw add ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_add_i16_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI62_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI62_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a11, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -4
+; XTENSA-ATOMIC-NEXT: and a12, a2, a12
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB62_2
+; XTENSA-ATOMIC-NEXT: .LBB62_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB62_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB62_4
+; XTENSA-ATOMIC-NEXT: .LBB62_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a11
+; XTENSA-ATOMIC-NEXT: add a6, a15, a9
+; XTENSA-ATOMIC-NEXT: and a6, a6, a10
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB62_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB62_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB62_1
+; XTENSA-ATOMIC-NEXT: .LBB62_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw add ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_add_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_add_i16_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 4
+; XTENSA-NEXT: l32r a8, .LCPI63_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI63_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a11, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -4
+; XTENSA-ATOMIC-NEXT: and a12, a2, a12
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB63_2
+; XTENSA-ATOMIC-NEXT: .LBB63_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB63_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB63_4
+; XTENSA-ATOMIC-NEXT: .LBB63_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a11
+; XTENSA-ATOMIC-NEXT: add a6, a15, a9
+; XTENSA-ATOMIC-NEXT: and a6, a6, a10
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB63_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB63_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB63_1
+; XTENSA-ATOMIC-NEXT: .LBB63_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw add ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_add_i16_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI64_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_add_i16_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI64_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a11, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -4
+; XTENSA-ATOMIC-NEXT: and a12, a2, a12
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB64_2
+; XTENSA-ATOMIC-NEXT: .LBB64_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB64_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB64_4
+; XTENSA-ATOMIC-NEXT: .LBB64_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a11
+; XTENSA-ATOMIC-NEXT: add a6, a15, a9
+; XTENSA-ATOMIC-NEXT: and a6, a6, a10
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB64_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB64_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB64_1
+; XTENSA-ATOMIC-NEXT: .LBB64_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw add ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_sub_i16_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI65_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI65_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a11, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -4
+; XTENSA-ATOMIC-NEXT: and a12, a2, a12
+; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB65_2
+; XTENSA-ATOMIC-NEXT: .LBB65_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB65_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB65_4
+; XTENSA-ATOMIC-NEXT: .LBB65_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a11
+; XTENSA-ATOMIC-NEXT: sub a6, a15, a9
+; XTENSA-ATOMIC-NEXT: and a6, a6, a10
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB65_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB65_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB65_1
+; XTENSA-ATOMIC-NEXT: .LBB65_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw sub ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_sub_i16_acquire(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_sub_i16_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 2
+; XTENSA-NEXT: l32r a8, .LCPI66_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI66_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a11, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -4
+; XTENSA-ATOMIC-NEXT: and a12, a2, a12
+; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB66_2
+; XTENSA-ATOMIC-NEXT: .LBB66_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB66_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB66_4
+; XTENSA-ATOMIC-NEXT: .LBB66_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a11
+; XTENSA-ATOMIC-NEXT: sub a6, a15, a9
+; XTENSA-ATOMIC-NEXT: and a6, a6, a10
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB66_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB66_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB66_1
+; XTENSA-ATOMIC-NEXT: .LBB66_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw sub ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_sub_i16_release(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_sub_i16_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI67_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI67_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a11, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -4
+; XTENSA-ATOMIC-NEXT: and a12, a2, a12
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB67_2
+; XTENSA-ATOMIC-NEXT: .LBB67_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB67_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB67_4
+; XTENSA-ATOMIC-NEXT: .LBB67_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a11
+; XTENSA-ATOMIC-NEXT: sub a6, a15, a9
+; XTENSA-ATOMIC-NEXT: and a6, a6, a10
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB67_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB67_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB67_1
+; XTENSA-ATOMIC-NEXT: .LBB67_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw sub ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_sub_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_sub_i16_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 4
+; XTENSA-NEXT: l32r a8, .LCPI68_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI68_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a11, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -4
+; XTENSA-ATOMIC-NEXT: and a12, a2, a12
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB68_2
+; XTENSA-ATOMIC-NEXT: .LBB68_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB68_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB68_4
+; XTENSA-ATOMIC-NEXT: .LBB68_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a11
+; XTENSA-ATOMIC-NEXT: sub a6, a15, a9
+; XTENSA-ATOMIC-NEXT: and a6, a6, a10
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB68_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB68_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB68_1
+; XTENSA-ATOMIC-NEXT: .LBB68_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw sub ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_sub_i16_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI69_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i16_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI69_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a11, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -4
+; XTENSA-ATOMIC-NEXT: and a12, a2, a12
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 1
+; XTENSA-ATOMIC-NEXT: j .LBB69_2
+; XTENSA-ATOMIC-NEXT: .LBB69_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB69_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB69_4
+; XTENSA-ATOMIC-NEXT: .LBB69_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a11
+; XTENSA-ATOMIC-NEXT: sub a6, a15, a9
+; XTENSA-ATOMIC-NEXT: and a6, a6, a10
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a12, 0
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB69_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB69_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a13, a13
+; XTENSA-ATOMIC-NEXT: j .LBB69_1
+; XTENSA-ATOMIC-NEXT: .LBB69_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw sub ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_and_i16_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI70_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI70_0
+; XTENSA-ATOMIC-NEXT: and a10, a3, a9
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a11
+; XTENSA-ATOMIC-NEXT: or a9, a10, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB70_2
+; XTENSA-ATOMIC-NEXT: .LBB70_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB70_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB70_4
+; XTENSA-ATOMIC-NEXT: .LBB70_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB70_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB70_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB70_1
+; XTENSA-ATOMIC-NEXT: .LBB70_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw and ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_and_i16_acquire(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_and_i16_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 2
+; XTENSA-NEXT: l32r a8, .LCPI71_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI71_0
+; XTENSA-ATOMIC-NEXT: and a10, a3, a9
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a11
+; XTENSA-ATOMIC-NEXT: or a9, a10, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB71_2
+; XTENSA-ATOMIC-NEXT: .LBB71_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB71_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB71_4
+; XTENSA-ATOMIC-NEXT: .LBB71_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB71_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB71_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB71_1
+; XTENSA-ATOMIC-NEXT: .LBB71_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw and ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_and_i16_release(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_and_i16_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI72_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI72_0
+; XTENSA-ATOMIC-NEXT: and a10, a3, a9
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a11
+; XTENSA-ATOMIC-NEXT: or a9, a10, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB72_2
+; XTENSA-ATOMIC-NEXT: .LBB72_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB72_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB72_4
+; XTENSA-ATOMIC-NEXT: .LBB72_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB72_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB72_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB72_1
+; XTENSA-ATOMIC-NEXT: .LBB72_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw and ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_and_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_and_i16_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 4
+; XTENSA-NEXT: l32r a8, .LCPI73_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI73_0
+; XTENSA-ATOMIC-NEXT: and a10, a3, a9
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a11
+; XTENSA-ATOMIC-NEXT: or a9, a10, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB73_2
+; XTENSA-ATOMIC-NEXT: .LBB73_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB73_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB73_4
+; XTENSA-ATOMIC-NEXT: .LBB73_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB73_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB73_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB73_1
+; XTENSA-ATOMIC-NEXT: .LBB73_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw and ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_and_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_and_i16_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI74_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_and_i16_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI74_0
+; XTENSA-ATOMIC-NEXT: and a10, a3, a9
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a11
+; XTENSA-ATOMIC-NEXT: or a9, a10, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB74_2
+; XTENSA-ATOMIC-NEXT: .LBB74_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB74_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB74_4
+; XTENSA-ATOMIC-NEXT: .LBB74_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB74_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB74_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB74_1
+; XTENSA-ATOMIC-NEXT: .LBB74_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw and ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_nand_i16_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI75_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI75_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a12, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a13, -4
+; XTENSA-ATOMIC-NEXT: and a13, a2, a13
+; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 0
+; XTENSA-ATOMIC-NEXT: movi a15, 1
+; XTENSA-ATOMIC-NEXT: j .LBB75_2
+; XTENSA-ATOMIC-NEXT: .LBB75_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB75_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a6, a6
+; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB75_4
+; XTENSA-ATOMIC-NEXT: .LBB75_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a6, a7, a12
+; XTENSA-ATOMIC-NEXT: and a5, a7, a9
+; XTENSA-ATOMIC-NEXT: xor a5, a5, a11
+; XTENSA-ATOMIC-NEXT: and a5, a5, a10
+; XTENSA-ATOMIC-NEXT: or a6, a6, a5
+; XTENSA-ATOMIC-NEXT: wsr a7, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0
+; XTENSA-ATOMIC-NEXT: or a5, a15, a15
+; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB75_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB75_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a5, a14, a14
+; XTENSA-ATOMIC-NEXT: j .LBB75_1
+; XTENSA-ATOMIC-NEXT: .LBB75_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a6
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw nand ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_nand_i16_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 2
+; XTENSA-NEXT: l32r a8, .LCPI76_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI76_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a12, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a13, -4
+; XTENSA-ATOMIC-NEXT: and a13, a2, a13
+; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 0
+; XTENSA-ATOMIC-NEXT: movi a15, 1
+; XTENSA-ATOMIC-NEXT: j .LBB76_2
+; XTENSA-ATOMIC-NEXT: .LBB76_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB76_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a6, a6
+; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB76_4
+; XTENSA-ATOMIC-NEXT: .LBB76_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a6, a7, a12
+; XTENSA-ATOMIC-NEXT: and a5, a7, a9
+; XTENSA-ATOMIC-NEXT: xor a5, a5, a11
+; XTENSA-ATOMIC-NEXT: and a5, a5, a10
+; XTENSA-ATOMIC-NEXT: or a6, a6, a5
+; XTENSA-ATOMIC-NEXT: wsr a7, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0
+; XTENSA-ATOMIC-NEXT: or a5, a15, a15
+; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB76_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB76_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a5, a14, a14
+; XTENSA-ATOMIC-NEXT: j .LBB76_1
+; XTENSA-ATOMIC-NEXT: .LBB76_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a6
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw nand ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_nand_i16_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI77_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI77_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a12, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a13, -4
+; XTENSA-ATOMIC-NEXT: and a13, a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 0
+; XTENSA-ATOMIC-NEXT: movi a15, 1
+; XTENSA-ATOMIC-NEXT: j .LBB77_2
+; XTENSA-ATOMIC-NEXT: .LBB77_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB77_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a6, a6
+; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB77_4
+; XTENSA-ATOMIC-NEXT: .LBB77_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a6, a7, a12
+; XTENSA-ATOMIC-NEXT: and a5, a7, a9
+; XTENSA-ATOMIC-NEXT: xor a5, a5, a11
+; XTENSA-ATOMIC-NEXT: and a5, a5, a10
+; XTENSA-ATOMIC-NEXT: or a6, a6, a5
+; XTENSA-ATOMIC-NEXT: wsr a7, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0
+; XTENSA-ATOMIC-NEXT: or a5, a15, a15
+; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB77_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB77_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a5, a14, a14
+; XTENSA-ATOMIC-NEXT: j .LBB77_1
+; XTENSA-ATOMIC-NEXT: .LBB77_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a6
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw nand ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_nand_i16_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 4
+; XTENSA-NEXT: l32r a8, .LCPI78_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI78_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a12, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a13, -4
+; XTENSA-ATOMIC-NEXT: and a13, a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 0
+; XTENSA-ATOMIC-NEXT: movi a15, 1
+; XTENSA-ATOMIC-NEXT: j .LBB78_2
+; XTENSA-ATOMIC-NEXT: .LBB78_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB78_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a6, a6
+; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB78_4
+; XTENSA-ATOMIC-NEXT: .LBB78_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a6, a7, a12
+; XTENSA-ATOMIC-NEXT: and a5, a7, a9
+; XTENSA-ATOMIC-NEXT: xor a5, a5, a11
+; XTENSA-ATOMIC-NEXT: and a5, a5, a10
+; XTENSA-ATOMIC-NEXT: or a6, a6, a5
+; XTENSA-ATOMIC-NEXT: wsr a7, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0
+; XTENSA-ATOMIC-NEXT: or a5, a15, a15
+; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB78_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB78_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a5, a14, a14
+; XTENSA-ATOMIC-NEXT: j .LBB78_1
+; XTENSA-ATOMIC-NEXT: .LBB78_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a6
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw nand ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_nand_i16_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI79_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_nand_i16_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a10, .LCPI79_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a10
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a11, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a11
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a10
+; XTENSA-ATOMIC-NEXT: movi a11, -1
+; XTENSA-ATOMIC-NEXT: xor a12, a10, a11
+; XTENSA-ATOMIC-NEXT: movi a13, -4
+; XTENSA-ATOMIC-NEXT: and a13, a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a7, a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 0
+; XTENSA-ATOMIC-NEXT: movi a15, 1
+; XTENSA-ATOMIC-NEXT: j .LBB79_2
+; XTENSA-ATOMIC-NEXT: .LBB79_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB79_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a6, a6
+; XTENSA-ATOMIC-NEXT: beqi a5, 1, .LBB79_4
+; XTENSA-ATOMIC-NEXT: .LBB79_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a6, a7, a12
+; XTENSA-ATOMIC-NEXT: and a5, a7, a9
+; XTENSA-ATOMIC-NEXT: xor a5, a5, a11
+; XTENSA-ATOMIC-NEXT: and a5, a5, a10
+; XTENSA-ATOMIC-NEXT: or a6, a6, a5
+; XTENSA-ATOMIC-NEXT: wsr a7, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a6, a13, 0
+; XTENSA-ATOMIC-NEXT: or a5, a15, a15
+; XTENSA-ATOMIC-NEXT: beq a6, a7, .LBB79_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB79_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a5, a14, a14
+; XTENSA-ATOMIC-NEXT: j .LBB79_1
+; XTENSA-ATOMIC-NEXT: .LBB79_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a6
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw nand ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_or_i16_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI80_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI80_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a8
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a10, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB80_2
+; XTENSA-ATOMIC-NEXT: .LBB80_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB80_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB80_4
+; XTENSA-ATOMIC-NEXT: .LBB80_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB80_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB80_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB80_1
+; XTENSA-ATOMIC-NEXT: .LBB80_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw or ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_or_i16_acquire(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_or_i16_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 2
+; XTENSA-NEXT: l32r a8, .LCPI81_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI81_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a8
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a10, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB81_2
+; XTENSA-ATOMIC-NEXT: .LBB81_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB81_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB81_4
+; XTENSA-ATOMIC-NEXT: .LBB81_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB81_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB81_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB81_1
+; XTENSA-ATOMIC-NEXT: .LBB81_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw or ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_or_i16_release(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_or_i16_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI82_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI82_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a8
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a10, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB82_2
+; XTENSA-ATOMIC-NEXT: .LBB82_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB82_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB82_4
+; XTENSA-ATOMIC-NEXT: .LBB82_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB82_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB82_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB82_1
+; XTENSA-ATOMIC-NEXT: .LBB82_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw or ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_or_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_or_i16_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 4
+; XTENSA-NEXT: l32r a8, .LCPI83_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI83_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a8
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a10, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB83_2
+; XTENSA-ATOMIC-NEXT: .LBB83_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB83_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB83_4
+; XTENSA-ATOMIC-NEXT: .LBB83_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB83_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB83_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB83_1
+; XTENSA-ATOMIC-NEXT: .LBB83_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw or ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_or_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_or_i16_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI84_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_or_i16_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI84_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a8
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a10, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB84_2
+; XTENSA-ATOMIC-NEXT: .LBB84_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB84_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB84_4
+; XTENSA-ATOMIC-NEXT: .LBB84_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB84_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB84_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB84_1
+; XTENSA-ATOMIC-NEXT: .LBB84_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw or ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xor_i16_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI85_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI85_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a8
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a10, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB85_2
+; XTENSA-ATOMIC-NEXT: .LBB85_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB85_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB85_4
+; XTENSA-ATOMIC-NEXT: .LBB85_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: xor a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB85_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB85_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB85_1
+; XTENSA-ATOMIC-NEXT: .LBB85_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xor ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_xor_i16_acquire(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xor_i16_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 2
+; XTENSA-NEXT: l32r a8, .LCPI86_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI86_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a8
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a10, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB86_2
+; XTENSA-ATOMIC-NEXT: .LBB86_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB86_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB86_4
+; XTENSA-ATOMIC-NEXT: .LBB86_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: xor a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB86_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB86_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB86_1
+; XTENSA-ATOMIC-NEXT: .LBB86_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xor ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_xor_i16_release(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xor_i16_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI87_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI87_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a8
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a10, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB87_2
+; XTENSA-ATOMIC-NEXT: .LBB87_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB87_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB87_4
+; XTENSA-ATOMIC-NEXT: .LBB87_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: xor a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB87_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB87_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB87_1
+; XTENSA-ATOMIC-NEXT: .LBB87_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xor ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_xor_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xor_i16_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 4
+; XTENSA-NEXT: l32r a8, .LCPI88_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI88_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a8
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a10, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB88_2
+; XTENSA-ATOMIC-NEXT: .LBB88_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB88_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB88_4
+; XTENSA-ATOMIC-NEXT: .LBB88_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: xor a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB88_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB88_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB88_1
+; XTENSA-ATOMIC-NEXT: .LBB88_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xor ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_xor_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xor_i16_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI89_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i16_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32r a8, .LCPI89_0
+; XTENSA-ATOMIC-NEXT: and a9, a3, a8
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a10, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a10
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB89_2
+; XTENSA-ATOMIC-NEXT: .LBB89_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB89_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a13, a13
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB89_4
+; XTENSA-ATOMIC-NEXT: .LBB89_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: xor a13, a14, a9
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a13, a14, .LBB89_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB89_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB89_1
+; XTENSA-ATOMIC-NEXT: .LBB89_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xor ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_max_i16_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l16ui a2, a6, 0
+; XTENSA-NEXT: slli a8, a3, 16
+; XTENSA-NEXT: srai a5, a8, 16
+; XTENSA-NEXT: movi a7, 0
+; XTENSA-NEXT: l32r a4, .LCPI90_0
+; XTENSA-NEXT: j .LBB90_2
+; XTENSA-NEXT: .LBB90_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB90_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l16ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB90_4
+; XTENSA-NEXT: .LBB90_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s16i a2, a1, 0
+; XTENSA-NEXT: slli a8, a2, 16
+; XTENSA-NEXT: srai a8, a8, 16
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bge a5, a8, .LBB90_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB90_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB90_1
+; XTENSA-NEXT: .LBB90_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_max_i16_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI90_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: slli a11, a3, 16
+; XTENSA-ATOMIC-NEXT: srai a11, a11, 16
+; XTENSA-ATOMIC-NEXT: movi a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 1
+; XTENSA-ATOMIC-NEXT: j .LBB90_2
+; XTENSA-ATOMIC-NEXT: .LBB90_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB90_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a15, a15
+; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB90_6
+; XTENSA-ATOMIC-NEXT: .LBB90_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a15, a14
+; XTENSA-ATOMIC-NEXT: slli a7, a15, 16
+; XTENSA-ATOMIC-NEXT: srai a6, a7, 16
+; XTENSA-ATOMIC-NEXT: or a7, a3, a3
+; XTENSA-ATOMIC-NEXT: bge a11, a6, .LBB90_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB90_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a15, a15
+; XTENSA-ATOMIC-NEXT: .LBB90_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB90_2 Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI90_0
+; XTENSA-ATOMIC-NEXT: and a15, a7, a15
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a15, a15
+; XTENSA-ATOMIC-NEXT: and a7, a14, a9
+; XTENSA-ATOMIC-NEXT: or a15, a7, a15
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB90_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB90_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB90_1
+; XTENSA-ATOMIC-NEXT: .LBB90_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a15
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw max ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_max_i16_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l16ui a2, a6, 0
+; XTENSA-NEXT: slli a8, a3, 16
+; XTENSA-NEXT: srai a5, a8, 16
+; XTENSA-NEXT: movi a7, 2
+; XTENSA-NEXT: l32r a4, .LCPI91_0
+; XTENSA-NEXT: j .LBB91_2
+; XTENSA-NEXT: .LBB91_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB91_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l16ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB91_4
+; XTENSA-NEXT: .LBB91_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s16i a2, a1, 0
+; XTENSA-NEXT: slli a8, a2, 16
+; XTENSA-NEXT: srai a8, a8, 16
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bge a5, a8, .LBB91_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB91_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB91_1
+; XTENSA-NEXT: .LBB91_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_max_i16_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI91_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: slli a11, a3, 16
+; XTENSA-ATOMIC-NEXT: srai a11, a11, 16
+; XTENSA-ATOMIC-NEXT: movi a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 1
+; XTENSA-ATOMIC-NEXT: j .LBB91_2
+; XTENSA-ATOMIC-NEXT: .LBB91_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB91_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a15, a15
+; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB91_6
+; XTENSA-ATOMIC-NEXT: .LBB91_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a15, a14
+; XTENSA-ATOMIC-NEXT: slli a7, a15, 16
+; XTENSA-ATOMIC-NEXT: srai a6, a7, 16
+; XTENSA-ATOMIC-NEXT: or a7, a3, a3
+; XTENSA-ATOMIC-NEXT: bge a11, a6, .LBB91_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB91_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a15, a15
+; XTENSA-ATOMIC-NEXT: .LBB91_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB91_2 Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI91_0
+; XTENSA-ATOMIC-NEXT: and a15, a7, a15
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a15, a15
+; XTENSA-ATOMIC-NEXT: and a7, a14, a9
+; XTENSA-ATOMIC-NEXT: or a15, a7, a15
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB91_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB91_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB91_1
+; XTENSA-ATOMIC-NEXT: .LBB91_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a15
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw max ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_max_i16_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a9, a2, a2
+; XTENSA-NEXT: l16ui a2, a9, 0
+; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill
+; XTENSA-NEXT: slli a8, a3, 16
+; XTENSA-NEXT: or a3, a9, a9
+; XTENSA-NEXT: srai a4, a8, 16
+; XTENSA-NEXT: movi a7, 3
+; XTENSA-NEXT: movi a6, 0
+; XTENSA-NEXT: l32r a5, .LCPI92_0
+; XTENSA-NEXT: j .LBB92_2
+; XTENSA-NEXT: .LBB92_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB92_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 4
+; XTENSA-NEXT: or a10, a3, a3
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l16ui a2, a1, 4
+; XTENSA-NEXT: bnez a10, .LBB92_4
+; XTENSA-NEXT: .LBB92_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s16i a2, a1, 4
+; XTENSA-NEXT: slli a8, a2, 16
+; XTENSA-NEXT: srai a8, a8, 16
+; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload
+; XTENSA-NEXT: bge a4, a8, .LBB92_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB92_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB92_1
+; XTENSA-NEXT: .LBB92_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_max_i16_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI92_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: slli a11, a3, 16
+; XTENSA-ATOMIC-NEXT: srai a11, a11, 16
+; XTENSA-ATOMIC-NEXT: movi a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 1
+; XTENSA-ATOMIC-NEXT: j .LBB92_2
+; XTENSA-ATOMIC-NEXT: .LBB92_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB92_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a15, a15
+; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB92_6
+; XTENSA-ATOMIC-NEXT: .LBB92_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a15, a14
+; XTENSA-ATOMIC-NEXT: slli a7, a15, 16
+; XTENSA-ATOMIC-NEXT: srai a6, a7, 16
+; XTENSA-ATOMIC-NEXT: or a7, a3, a3
+; XTENSA-ATOMIC-NEXT: bge a11, a6, .LBB92_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB92_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a15, a15
+; XTENSA-ATOMIC-NEXT: .LBB92_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB92_2 Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI92_0
+; XTENSA-ATOMIC-NEXT: and a15, a7, a15
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a15, a15
+; XTENSA-ATOMIC-NEXT: and a7, a14, a9
+; XTENSA-ATOMIC-NEXT: or a15, a7, a15
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB92_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB92_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB92_1
+; XTENSA-ATOMIC-NEXT: .LBB92_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a15
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw max ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_max_i16_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a9, a2, a2
+; XTENSA-NEXT: l16ui a2, a9, 0
+; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill
+; XTENSA-NEXT: slli a8, a3, 16
+; XTENSA-NEXT: or a3, a9, a9
+; XTENSA-NEXT: srai a4, a8, 16
+; XTENSA-NEXT: movi a7, 4
+; XTENSA-NEXT: movi a6, 2
+; XTENSA-NEXT: l32r a5, .LCPI93_0
+; XTENSA-NEXT: j .LBB93_2
+; XTENSA-NEXT: .LBB93_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB93_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 4
+; XTENSA-NEXT: or a10, a3, a3
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l16ui a2, a1, 4
+; XTENSA-NEXT: bnez a10, .LBB93_4
+; XTENSA-NEXT: .LBB93_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s16i a2, a1, 4
+; XTENSA-NEXT: slli a8, a2, 16
+; XTENSA-NEXT: srai a8, a8, 16
+; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload
+; XTENSA-NEXT: bge a4, a8, .LBB93_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB93_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB93_1
+; XTENSA-NEXT: .LBB93_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_max_i16_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI93_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: slli a11, a3, 16
+; XTENSA-ATOMIC-NEXT: srai a11, a11, 16
+; XTENSA-ATOMIC-NEXT: movi a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 1
+; XTENSA-ATOMIC-NEXT: j .LBB93_2
+; XTENSA-ATOMIC-NEXT: .LBB93_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB93_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a15, a15
+; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB93_6
+; XTENSA-ATOMIC-NEXT: .LBB93_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a15, a14
+; XTENSA-ATOMIC-NEXT: slli a7, a15, 16
+; XTENSA-ATOMIC-NEXT: srai a6, a7, 16
+; XTENSA-ATOMIC-NEXT: or a7, a3, a3
+; XTENSA-ATOMIC-NEXT: bge a11, a6, .LBB93_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB93_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a15, a15
+; XTENSA-ATOMIC-NEXT: .LBB93_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB93_2 Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI93_0
+; XTENSA-ATOMIC-NEXT: and a15, a7, a15
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a15, a15
+; XTENSA-ATOMIC-NEXT: and a7, a14, a9
+; XTENSA-ATOMIC-NEXT: or a15, a7, a15
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB93_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB93_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB93_1
+; XTENSA-ATOMIC-NEXT: .LBB93_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a15
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw max ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_max_i16_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l16ui a2, a6, 0
+; XTENSA-NEXT: slli a8, a3, 16
+; XTENSA-NEXT: srai a5, a8, 16
+; XTENSA-NEXT: movi a7, 5
+; XTENSA-NEXT: l32r a4, .LCPI94_0
+; XTENSA-NEXT: j .LBB94_2
+; XTENSA-NEXT: .LBB94_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB94_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l16ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB94_4
+; XTENSA-NEXT: .LBB94_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s16i a2, a1, 0
+; XTENSA-NEXT: slli a8, a2, 16
+; XTENSA-NEXT: srai a8, a8, 16
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bge a5, a8, .LBB94_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB94_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB94_1
+; XTENSA-NEXT: .LBB94_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_max_i16_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI94_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: slli a11, a3, 16
+; XTENSA-ATOMIC-NEXT: srai a11, a11, 16
+; XTENSA-ATOMIC-NEXT: movi a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 1
+; XTENSA-ATOMIC-NEXT: j .LBB94_2
+; XTENSA-ATOMIC-NEXT: .LBB94_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB94_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a15, a15
+; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB94_6
+; XTENSA-ATOMIC-NEXT: .LBB94_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a15, a14
+; XTENSA-ATOMIC-NEXT: slli a7, a15, 16
+; XTENSA-ATOMIC-NEXT: srai a6, a7, 16
+; XTENSA-ATOMIC-NEXT: or a7, a3, a3
+; XTENSA-ATOMIC-NEXT: bge a11, a6, .LBB94_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB94_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a15, a15
+; XTENSA-ATOMIC-NEXT: .LBB94_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB94_2 Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI94_0
+; XTENSA-ATOMIC-NEXT: and a15, a7, a15
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a15, a15
+; XTENSA-ATOMIC-NEXT: and a7, a14, a9
+; XTENSA-ATOMIC-NEXT: or a15, a7, a15
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB94_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB94_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB94_1
+; XTENSA-ATOMIC-NEXT: .LBB94_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a15
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw max ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_min_i16_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l16ui a2, a6, 0
+; XTENSA-NEXT: slli a8, a3, 16
+; XTENSA-NEXT: srai a5, a8, 16
+; XTENSA-NEXT: movi a7, 0
+; XTENSA-NEXT: l32r a4, .LCPI95_0
+; XTENSA-NEXT: j .LBB95_2
+; XTENSA-NEXT: .LBB95_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB95_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l16ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB95_4
+; XTENSA-NEXT: .LBB95_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s16i a2, a1, 0
+; XTENSA-NEXT: slli a8, a2, 16
+; XTENSA-NEXT: srai a8, a8, 16
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: blt a5, a8, .LBB95_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB95_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB95_1
+; XTENSA-NEXT: .LBB95_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_min_i16_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI95_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: slli a11, a3, 16
+; XTENSA-ATOMIC-NEXT: srai a11, a11, 16
+; XTENSA-ATOMIC-NEXT: movi a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 1
+; XTENSA-ATOMIC-NEXT: j .LBB95_2
+; XTENSA-ATOMIC-NEXT: .LBB95_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB95_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a15, a15
+; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB95_6
+; XTENSA-ATOMIC-NEXT: .LBB95_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a15, a14
+; XTENSA-ATOMIC-NEXT: slli a7, a15, 16
+; XTENSA-ATOMIC-NEXT: srai a6, a7, 16
+; XTENSA-ATOMIC-NEXT: or a7, a3, a3
+; XTENSA-ATOMIC-NEXT: blt a11, a6, .LBB95_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB95_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a15, a15
+; XTENSA-ATOMIC-NEXT: .LBB95_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB95_2 Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI95_0
+; XTENSA-ATOMIC-NEXT: and a15, a7, a15
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a15, a15
+; XTENSA-ATOMIC-NEXT: and a7, a14, a9
+; XTENSA-ATOMIC-NEXT: or a15, a7, a15
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB95_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB95_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB95_1
+; XTENSA-ATOMIC-NEXT: .LBB95_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a15
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw min ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_min_i16_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l16ui a2, a6, 0
+; XTENSA-NEXT: slli a8, a3, 16
+; XTENSA-NEXT: srai a5, a8, 16
+; XTENSA-NEXT: movi a7, 2
+; XTENSA-NEXT: l32r a4, .LCPI96_0
+; XTENSA-NEXT: j .LBB96_2
+; XTENSA-NEXT: .LBB96_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB96_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l16ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB96_4
+; XTENSA-NEXT: .LBB96_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s16i a2, a1, 0
+; XTENSA-NEXT: slli a8, a2, 16
+; XTENSA-NEXT: srai a8, a8, 16
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: blt a5, a8, .LBB96_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB96_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB96_1
+; XTENSA-NEXT: .LBB96_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_min_i16_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI96_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: slli a11, a3, 16
+; XTENSA-ATOMIC-NEXT: srai a11, a11, 16
+; XTENSA-ATOMIC-NEXT: movi a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 1
+; XTENSA-ATOMIC-NEXT: j .LBB96_2
+; XTENSA-ATOMIC-NEXT: .LBB96_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB96_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a15, a15
+; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB96_6
+; XTENSA-ATOMIC-NEXT: .LBB96_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a15, a14
+; XTENSA-ATOMIC-NEXT: slli a7, a15, 16
+; XTENSA-ATOMIC-NEXT: srai a6, a7, 16
+; XTENSA-ATOMIC-NEXT: or a7, a3, a3
+; XTENSA-ATOMIC-NEXT: blt a11, a6, .LBB96_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB96_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a15, a15
+; XTENSA-ATOMIC-NEXT: .LBB96_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB96_2 Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI96_0
+; XTENSA-ATOMIC-NEXT: and a15, a7, a15
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a15, a15
+; XTENSA-ATOMIC-NEXT: and a7, a14, a9
+; XTENSA-ATOMIC-NEXT: or a15, a7, a15
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB96_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB96_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB96_1
+; XTENSA-ATOMIC-NEXT: .LBB96_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a15
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw min ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_min_i16_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a9, a2, a2
+; XTENSA-NEXT: l16ui a2, a9, 0
+; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill
+; XTENSA-NEXT: slli a8, a3, 16
+; XTENSA-NEXT: or a3, a9, a9
+; XTENSA-NEXT: srai a4, a8, 16
+; XTENSA-NEXT: movi a7, 3
+; XTENSA-NEXT: movi a6, 0
+; XTENSA-NEXT: l32r a5, .LCPI97_0
+; XTENSA-NEXT: j .LBB97_2
+; XTENSA-NEXT: .LBB97_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB97_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 4
+; XTENSA-NEXT: or a10, a3, a3
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l16ui a2, a1, 4
+; XTENSA-NEXT: bnez a10, .LBB97_4
+; XTENSA-NEXT: .LBB97_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s16i a2, a1, 4
+; XTENSA-NEXT: slli a8, a2, 16
+; XTENSA-NEXT: srai a8, a8, 16
+; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload
+; XTENSA-NEXT: blt a4, a8, .LBB97_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB97_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB97_1
+; XTENSA-NEXT: .LBB97_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_min_i16_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI97_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: slli a11, a3, 16
+; XTENSA-ATOMIC-NEXT: srai a11, a11, 16
+; XTENSA-ATOMIC-NEXT: movi a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 1
+; XTENSA-ATOMIC-NEXT: j .LBB97_2
+; XTENSA-ATOMIC-NEXT: .LBB97_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB97_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a15, a15
+; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB97_6
+; XTENSA-ATOMIC-NEXT: .LBB97_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a15, a14
+; XTENSA-ATOMIC-NEXT: slli a7, a15, 16
+; XTENSA-ATOMIC-NEXT: srai a6, a7, 16
+; XTENSA-ATOMIC-NEXT: or a7, a3, a3
+; XTENSA-ATOMIC-NEXT: blt a11, a6, .LBB97_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB97_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a15, a15
+; XTENSA-ATOMIC-NEXT: .LBB97_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB97_2 Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI97_0
+; XTENSA-ATOMIC-NEXT: and a15, a7, a15
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a15, a15
+; XTENSA-ATOMIC-NEXT: and a7, a14, a9
+; XTENSA-ATOMIC-NEXT: or a15, a7, a15
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB97_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB97_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB97_1
+; XTENSA-ATOMIC-NEXT: .LBB97_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a15
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw min ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_min_i16_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a9, a2, a2
+; XTENSA-NEXT: l16ui a2, a9, 0
+; XTENSA-NEXT: s32i a3, a1, 0 # 4-byte Folded Spill
+; XTENSA-NEXT: slli a8, a3, 16
+; XTENSA-NEXT: or a3, a9, a9
+; XTENSA-NEXT: srai a4, a8, 16
+; XTENSA-NEXT: movi a7, 4
+; XTENSA-NEXT: movi a6, 2
+; XTENSA-NEXT: l32r a5, .LCPI98_0
+; XTENSA-NEXT: j .LBB98_2
+; XTENSA-NEXT: .LBB98_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB98_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 4
+; XTENSA-NEXT: or a10, a3, a3
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l16ui a2, a1, 4
+; XTENSA-NEXT: bnez a10, .LBB98_4
+; XTENSA-NEXT: .LBB98_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s16i a2, a1, 4
+; XTENSA-NEXT: slli a8, a2, 16
+; XTENSA-NEXT: srai a8, a8, 16
+; XTENSA-NEXT: l32i a12, a1, 0 # 4-byte Folded Reload
+; XTENSA-NEXT: blt a4, a8, .LBB98_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB98_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB98_1
+; XTENSA-NEXT: .LBB98_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_min_i16_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI98_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: slli a11, a3, 16
+; XTENSA-ATOMIC-NEXT: srai a11, a11, 16
+; XTENSA-ATOMIC-NEXT: movi a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 1
+; XTENSA-ATOMIC-NEXT: j .LBB98_2
+; XTENSA-ATOMIC-NEXT: .LBB98_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB98_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a15, a15
+; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB98_6
+; XTENSA-ATOMIC-NEXT: .LBB98_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a15, a14
+; XTENSA-ATOMIC-NEXT: slli a7, a15, 16
+; XTENSA-ATOMIC-NEXT: srai a6, a7, 16
+; XTENSA-ATOMIC-NEXT: or a7, a3, a3
+; XTENSA-ATOMIC-NEXT: blt a11, a6, .LBB98_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB98_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a15, a15
+; XTENSA-ATOMIC-NEXT: .LBB98_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB98_2 Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI98_0
+; XTENSA-ATOMIC-NEXT: and a15, a7, a15
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a15, a15
+; XTENSA-ATOMIC-NEXT: and a7, a14, a9
+; XTENSA-ATOMIC-NEXT: or a15, a7, a15
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB98_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB98_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB98_1
+; XTENSA-ATOMIC-NEXT: .LBB98_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a15
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw min ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_min_i16_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l16ui a2, a6, 0
+; XTENSA-NEXT: slli a8, a3, 16
+; XTENSA-NEXT: srai a5, a8, 16
+; XTENSA-NEXT: movi a7, 5
+; XTENSA-NEXT: l32r a4, .LCPI99_0
+; XTENSA-NEXT: j .LBB99_2
+; XTENSA-NEXT: .LBB99_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB99_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l16ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB99_4
+; XTENSA-NEXT: .LBB99_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s16i a2, a1, 0
+; XTENSA-NEXT: slli a8, a2, 16
+; XTENSA-NEXT: srai a8, a8, 16
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: blt a5, a8, .LBB99_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB99_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB99_1
+; XTENSA-NEXT: .LBB99_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_min_i16_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI99_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: slli a11, a3, 16
+; XTENSA-ATOMIC-NEXT: srai a11, a11, 16
+; XTENSA-ATOMIC-NEXT: movi a12, 0
+; XTENSA-ATOMIC-NEXT: movi a13, 1
+; XTENSA-ATOMIC-NEXT: j .LBB99_2
+; XTENSA-ATOMIC-NEXT: .LBB99_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB99_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a15, a15
+; XTENSA-ATOMIC-NEXT: beqi a7, 1, .LBB99_6
+; XTENSA-ATOMIC-NEXT: .LBB99_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a15, a14
+; XTENSA-ATOMIC-NEXT: slli a7, a15, 16
+; XTENSA-ATOMIC-NEXT: srai a6, a7, 16
+; XTENSA-ATOMIC-NEXT: or a7, a3, a3
+; XTENSA-ATOMIC-NEXT: blt a11, a6, .LBB99_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB99_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a15, a15
+; XTENSA-ATOMIC-NEXT: .LBB99_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB99_2 Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a15, .LCPI99_0
+; XTENSA-ATOMIC-NEXT: and a15, a7, a15
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a15, a15
+; XTENSA-ATOMIC-NEXT: and a7, a14, a9
+; XTENSA-ATOMIC-NEXT: or a15, a7, a15
+; XTENSA-ATOMIC-NEXT: wsr a14, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a15, a10, 0
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: beq a15, a14, .LBB99_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB99_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB99_1
+; XTENSA-ATOMIC-NEXT: .LBB99_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a15
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw min ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umax_i16_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l16ui a2, a6, 0
+; XTENSA-NEXT: movi a7, 0
+; XTENSA-NEXT: l32r a5, .LCPI100_1
+; XTENSA-NEXT: j .LBB100_2
+; XTENSA-NEXT: .LBB100_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB100_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l16ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB100_4
+; XTENSA-NEXT: .LBB100_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: l32r a8, .LCPI100_0
+; XTENSA-NEXT: and a9, a3, a8
+; XTENSA-NEXT: s16i a2, a1, 0
+; XTENSA-NEXT: and a8, a2, a8
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bgeu a9, a8, .LBB100_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB100_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB100_1
+; XTENSA-NEXT: .LBB100_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i16_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI100_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB100_2
+; XTENSA-ATOMIC-NEXT: .LBB100_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB100_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a14, a14
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB100_6
+; XTENSA-ATOMIC-NEXT: .LBB100_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI100_0
+; XTENSA-ATOMIC-NEXT: and a6, a3, a14
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a15, a13
+; XTENSA-ATOMIC-NEXT: and a5, a15, a14
+; XTENSA-ATOMIC-NEXT: or a7, a3, a3
+; XTENSA-ATOMIC-NEXT: bgeu a6, a5, .LBB100_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB100_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a15, a15
+; XTENSA-ATOMIC-NEXT: .LBB100_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB100_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a14, a7, a14
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a14, a14
+; XTENSA-ATOMIC-NEXT: and a15, a13, a9
+; XTENSA-ATOMIC-NEXT: or a14, a15, a14
+; XTENSA-ATOMIC-NEXT: wsr a13, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB100_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB100_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB100_1
+; XTENSA-ATOMIC-NEXT: .LBB100_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umax ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umax_i16_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l16ui a2, a6, 0
+; XTENSA-NEXT: movi a7, 2
+; XTENSA-NEXT: l32r a5, .LCPI101_1
+; XTENSA-NEXT: j .LBB101_2
+; XTENSA-NEXT: .LBB101_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB101_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l16ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB101_4
+; XTENSA-NEXT: .LBB101_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: l32r a8, .LCPI101_0
+; XTENSA-NEXT: and a9, a3, a8
+; XTENSA-NEXT: s16i a2, a1, 0
+; XTENSA-NEXT: and a8, a2, a8
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bgeu a9, a8, .LBB101_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB101_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB101_1
+; XTENSA-NEXT: .LBB101_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i16_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI101_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB101_2
+; XTENSA-ATOMIC-NEXT: .LBB101_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB101_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a14, a14
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB101_6
+; XTENSA-ATOMIC-NEXT: .LBB101_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI101_0
+; XTENSA-ATOMIC-NEXT: and a6, a3, a14
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a15, a13
+; XTENSA-ATOMIC-NEXT: and a5, a15, a14
+; XTENSA-ATOMIC-NEXT: or a7, a3, a3
+; XTENSA-ATOMIC-NEXT: bgeu a6, a5, .LBB101_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB101_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a15, a15
+; XTENSA-ATOMIC-NEXT: .LBB101_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB101_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a14, a7, a14
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a14, a14
+; XTENSA-ATOMIC-NEXT: and a15, a13, a9
+; XTENSA-ATOMIC-NEXT: or a14, a15, a14
+; XTENSA-ATOMIC-NEXT: wsr a13, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB101_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB101_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB101_1
+; XTENSA-ATOMIC-NEXT: .LBB101_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umax ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umax_i16_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a5, a2, a2
+; XTENSA-NEXT: l16ui a2, a5, 0
+; XTENSA-NEXT: movi a7, 3
+; XTENSA-NEXT: movi a6, 0
+; XTENSA-NEXT: l32r a4, .LCPI102_1
+; XTENSA-NEXT: j .LBB102_2
+; XTENSA-NEXT: .LBB102_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB102_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a5, a5
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l16ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB102_4
+; XTENSA-NEXT: .LBB102_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: l32r a8, .LCPI102_0
+; XTENSA-NEXT: and a9, a3, a8
+; XTENSA-NEXT: s16i a2, a1, 0
+; XTENSA-NEXT: and a8, a2, a8
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bgeu a9, a8, .LBB102_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB102_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB102_1
+; XTENSA-NEXT: .LBB102_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i16_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI102_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB102_2
+; XTENSA-ATOMIC-NEXT: .LBB102_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB102_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a14, a14
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB102_6
+; XTENSA-ATOMIC-NEXT: .LBB102_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI102_0
+; XTENSA-ATOMIC-NEXT: and a6, a3, a14
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a15, a13
+; XTENSA-ATOMIC-NEXT: and a5, a15, a14
+; XTENSA-ATOMIC-NEXT: or a7, a3, a3
+; XTENSA-ATOMIC-NEXT: bgeu a6, a5, .LBB102_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB102_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a15, a15
+; XTENSA-ATOMIC-NEXT: .LBB102_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB102_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a14, a7, a14
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a14, a14
+; XTENSA-ATOMIC-NEXT: and a15, a13, a9
+; XTENSA-ATOMIC-NEXT: or a14, a15, a14
+; XTENSA-ATOMIC-NEXT: wsr a13, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB102_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB102_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB102_1
+; XTENSA-ATOMIC-NEXT: .LBB102_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umax ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umax_i16_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a5, a2, a2
+; XTENSA-NEXT: l16ui a2, a5, 0
+; XTENSA-NEXT: movi a7, 4
+; XTENSA-NEXT: movi a6, 2
+; XTENSA-NEXT: l32r a4, .LCPI103_1
+; XTENSA-NEXT: j .LBB103_2
+; XTENSA-NEXT: .LBB103_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB103_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a5, a5
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l16ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB103_4
+; XTENSA-NEXT: .LBB103_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: l32r a8, .LCPI103_0
+; XTENSA-NEXT: and a9, a3, a8
+; XTENSA-NEXT: s16i a2, a1, 0
+; XTENSA-NEXT: and a8, a2, a8
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bgeu a9, a8, .LBB103_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB103_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB103_1
+; XTENSA-NEXT: .LBB103_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i16_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI103_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB103_2
+; XTENSA-ATOMIC-NEXT: .LBB103_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB103_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a14, a14
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB103_6
+; XTENSA-ATOMIC-NEXT: .LBB103_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI103_0
+; XTENSA-ATOMIC-NEXT: and a6, a3, a14
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a15, a13
+; XTENSA-ATOMIC-NEXT: and a5, a15, a14
+; XTENSA-ATOMIC-NEXT: or a7, a3, a3
+; XTENSA-ATOMIC-NEXT: bgeu a6, a5, .LBB103_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB103_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a15, a15
+; XTENSA-ATOMIC-NEXT: .LBB103_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB103_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a14, a7, a14
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a14, a14
+; XTENSA-ATOMIC-NEXT: and a15, a13, a9
+; XTENSA-ATOMIC-NEXT: or a14, a15, a14
+; XTENSA-ATOMIC-NEXT: wsr a13, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB103_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB103_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB103_1
+; XTENSA-ATOMIC-NEXT: .LBB103_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umax ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umax_i16_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l16ui a2, a6, 0
+; XTENSA-NEXT: movi a7, 5
+; XTENSA-NEXT: l32r a5, .LCPI104_1
+; XTENSA-NEXT: j .LBB104_2
+; XTENSA-NEXT: .LBB104_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB104_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l16ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB104_4
+; XTENSA-NEXT: .LBB104_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: l32r a8, .LCPI104_0
+; XTENSA-NEXT: and a9, a3, a8
+; XTENSA-NEXT: s16i a2, a1, 0
+; XTENSA-NEXT: and a8, a2, a8
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bgeu a9, a8, .LBB104_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB104_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB104_1
+; XTENSA-NEXT: .LBB104_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i16_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI104_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB104_2
+; XTENSA-ATOMIC-NEXT: .LBB104_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB104_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a14, a14
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB104_6
+; XTENSA-ATOMIC-NEXT: .LBB104_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI104_0
+; XTENSA-ATOMIC-NEXT: and a6, a3, a14
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a15, a13
+; XTENSA-ATOMIC-NEXT: and a5, a15, a14
+; XTENSA-ATOMIC-NEXT: or a7, a3, a3
+; XTENSA-ATOMIC-NEXT: bgeu a6, a5, .LBB104_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB104_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a15, a15
+; XTENSA-ATOMIC-NEXT: .LBB104_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB104_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a14, a7, a14
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a14, a14
+; XTENSA-ATOMIC-NEXT: and a15, a13, a9
+; XTENSA-ATOMIC-NEXT: or a14, a15, a14
+; XTENSA-ATOMIC-NEXT: wsr a13, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB104_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB104_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB104_1
+; XTENSA-ATOMIC-NEXT: .LBB104_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umax ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umin_i16_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l16ui a2, a6, 0
+; XTENSA-NEXT: movi a7, 0
+; XTENSA-NEXT: l32r a5, .LCPI105_1
+; XTENSA-NEXT: j .LBB105_2
+; XTENSA-NEXT: .LBB105_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB105_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l16ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB105_4
+; XTENSA-NEXT: .LBB105_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: l32r a8, .LCPI105_0
+; XTENSA-NEXT: and a9, a3, a8
+; XTENSA-NEXT: s16i a2, a1, 0
+; XTENSA-NEXT: and a8, a2, a8
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bltu a9, a8, .LBB105_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB105_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB105_1
+; XTENSA-NEXT: .LBB105_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i16_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI105_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB105_2
+; XTENSA-ATOMIC-NEXT: .LBB105_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB105_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a14, a14
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB105_6
+; XTENSA-ATOMIC-NEXT: .LBB105_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI105_0
+; XTENSA-ATOMIC-NEXT: and a6, a3, a14
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a15, a13
+; XTENSA-ATOMIC-NEXT: and a5, a15, a14
+; XTENSA-ATOMIC-NEXT: or a7, a3, a3
+; XTENSA-ATOMIC-NEXT: bltu a6, a5, .LBB105_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB105_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a15, a15
+; XTENSA-ATOMIC-NEXT: .LBB105_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB105_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a14, a7, a14
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a14, a14
+; XTENSA-ATOMIC-NEXT: and a15, a13, a9
+; XTENSA-ATOMIC-NEXT: or a14, a15, a14
+; XTENSA-ATOMIC-NEXT: wsr a13, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB105_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB105_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB105_1
+; XTENSA-ATOMIC-NEXT: .LBB105_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umin ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umin_i16_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l16ui a2, a6, 0
+; XTENSA-NEXT: movi a7, 2
+; XTENSA-NEXT: l32r a5, .LCPI106_1
+; XTENSA-NEXT: j .LBB106_2
+; XTENSA-NEXT: .LBB106_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB106_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l16ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB106_4
+; XTENSA-NEXT: .LBB106_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: l32r a8, .LCPI106_0
+; XTENSA-NEXT: and a9, a3, a8
+; XTENSA-NEXT: s16i a2, a1, 0
+; XTENSA-NEXT: and a8, a2, a8
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bltu a9, a8, .LBB106_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB106_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB106_1
+; XTENSA-NEXT: .LBB106_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i16_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI106_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB106_2
+; XTENSA-ATOMIC-NEXT: .LBB106_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB106_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a14, a14
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB106_6
+; XTENSA-ATOMIC-NEXT: .LBB106_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI106_0
+; XTENSA-ATOMIC-NEXT: and a6, a3, a14
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a15, a13
+; XTENSA-ATOMIC-NEXT: and a5, a15, a14
+; XTENSA-ATOMIC-NEXT: or a7, a3, a3
+; XTENSA-ATOMIC-NEXT: bltu a6, a5, .LBB106_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB106_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a15, a15
+; XTENSA-ATOMIC-NEXT: .LBB106_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB106_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a14, a7, a14
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a14, a14
+; XTENSA-ATOMIC-NEXT: and a15, a13, a9
+; XTENSA-ATOMIC-NEXT: or a14, a15, a14
+; XTENSA-ATOMIC-NEXT: wsr a13, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB106_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB106_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB106_1
+; XTENSA-ATOMIC-NEXT: .LBB106_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umin ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umin_i16_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a5, a2, a2
+; XTENSA-NEXT: l16ui a2, a5, 0
+; XTENSA-NEXT: movi a7, 3
+; XTENSA-NEXT: movi a6, 0
+; XTENSA-NEXT: l32r a4, .LCPI107_1
+; XTENSA-NEXT: j .LBB107_2
+; XTENSA-NEXT: .LBB107_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB107_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a5, a5
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l16ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB107_4
+; XTENSA-NEXT: .LBB107_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: l32r a8, .LCPI107_0
+; XTENSA-NEXT: and a9, a3, a8
+; XTENSA-NEXT: s16i a2, a1, 0
+; XTENSA-NEXT: and a8, a2, a8
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bltu a9, a8, .LBB107_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB107_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB107_1
+; XTENSA-NEXT: .LBB107_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i16_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI107_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB107_2
+; XTENSA-ATOMIC-NEXT: .LBB107_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB107_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a14, a14
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB107_6
+; XTENSA-ATOMIC-NEXT: .LBB107_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI107_0
+; XTENSA-ATOMIC-NEXT: and a6, a3, a14
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a15, a13
+; XTENSA-ATOMIC-NEXT: and a5, a15, a14
+; XTENSA-ATOMIC-NEXT: or a7, a3, a3
+; XTENSA-ATOMIC-NEXT: bltu a6, a5, .LBB107_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB107_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a15, a15
+; XTENSA-ATOMIC-NEXT: .LBB107_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB107_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a14, a7, a14
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a14, a14
+; XTENSA-ATOMIC-NEXT: and a15, a13, a9
+; XTENSA-ATOMIC-NEXT: or a14, a15, a14
+; XTENSA-ATOMIC-NEXT: wsr a13, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB107_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB107_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB107_1
+; XTENSA-ATOMIC-NEXT: .LBB107_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umin ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umin_i16_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a5, a2, a2
+; XTENSA-NEXT: l16ui a2, a5, 0
+; XTENSA-NEXT: movi a7, 4
+; XTENSA-NEXT: movi a6, 2
+; XTENSA-NEXT: l32r a4, .LCPI108_1
+; XTENSA-NEXT: j .LBB108_2
+; XTENSA-NEXT: .LBB108_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB108_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a5, a5
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l16ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB108_4
+; XTENSA-NEXT: .LBB108_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: l32r a8, .LCPI108_0
+; XTENSA-NEXT: and a9, a3, a8
+; XTENSA-NEXT: s16i a2, a1, 0
+; XTENSA-NEXT: and a8, a2, a8
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bltu a9, a8, .LBB108_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB108_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB108_1
+; XTENSA-NEXT: .LBB108_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i16_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI108_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB108_2
+; XTENSA-ATOMIC-NEXT: .LBB108_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB108_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a14, a14
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB108_6
+; XTENSA-ATOMIC-NEXT: .LBB108_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI108_0
+; XTENSA-ATOMIC-NEXT: and a6, a3, a14
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a15, a13
+; XTENSA-ATOMIC-NEXT: and a5, a15, a14
+; XTENSA-ATOMIC-NEXT: or a7, a3, a3
+; XTENSA-ATOMIC-NEXT: bltu a6, a5, .LBB108_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB108_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a15, a15
+; XTENSA-ATOMIC-NEXT: .LBB108_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB108_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a14, a7, a14
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a14, a14
+; XTENSA-ATOMIC-NEXT: and a15, a13, a9
+; XTENSA-ATOMIC-NEXT: or a14, a15, a14
+; XTENSA-ATOMIC-NEXT: wsr a13, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB108_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB108_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB108_1
+; XTENSA-ATOMIC-NEXT: .LBB108_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umin ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umin_i16_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l16ui a2, a6, 0
+; XTENSA-NEXT: movi a7, 5
+; XTENSA-NEXT: l32r a5, .LCPI109_1
+; XTENSA-NEXT: j .LBB109_2
+; XTENSA-NEXT: .LBB109_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB109_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l16ui a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB109_4
+; XTENSA-NEXT: .LBB109_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: l32r a8, .LCPI109_0
+; XTENSA-NEXT: and a9, a3, a8
+; XTENSA-NEXT: s16i a2, a1, 0
+; XTENSA-NEXT: and a8, a2, a8
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bltu a9, a8, .LBB109_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB109_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB109_1
+; XTENSA-NEXT: .LBB109_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i16_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI109_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a13, a10, 0
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB109_2
+; XTENSA-ATOMIC-NEXT: .LBB109_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB109_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a14, a14
+; XTENSA-ATOMIC-NEXT: beqi a15, 1, .LBB109_6
+; XTENSA-ATOMIC-NEXT: .LBB109_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a14, .LCPI109_0
+; XTENSA-ATOMIC-NEXT: and a6, a3, a14
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a15, a13
+; XTENSA-ATOMIC-NEXT: and a5, a15, a14
+; XTENSA-ATOMIC-NEXT: or a7, a3, a3
+; XTENSA-ATOMIC-NEXT: bltu a6, a5, .LBB109_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB109_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a15, a15
+; XTENSA-ATOMIC-NEXT: .LBB109_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB109_2 Depth=1
+; XTENSA-ATOMIC-NEXT: and a14, a7, a14
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a14, a14
+; XTENSA-ATOMIC-NEXT: and a15, a13, a9
+; XTENSA-ATOMIC-NEXT: or a14, a15, a14
+; XTENSA-ATOMIC-NEXT: wsr a13, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: or a15, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a14, a13, .LBB109_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB109_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB109_1
+; XTENSA-ATOMIC-NEXT: .LBB109_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umin ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i32 @atomicrmw_xchg_i32_monotonic(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xchg_i32_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI110_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB110_2
+; XTENSA-ATOMIC-NEXT: .LBB110_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB110_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB110_4
+; XTENSA-ATOMIC-NEXT: .LBB110_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB110_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB110_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB110_1
+; XTENSA-ATOMIC-NEXT: .LBB110_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xchg ptr %a, i32 %b monotonic
+ ret i32 %res
+}
+
+define i32 @atomicrmw_xchg_i32_acquire(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xchg_i32_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 2
+; XTENSA-NEXT: l32r a8, .LCPI111_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB111_2
+; XTENSA-ATOMIC-NEXT: .LBB111_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB111_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB111_4
+; XTENSA-ATOMIC-NEXT: .LBB111_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB111_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB111_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB111_1
+; XTENSA-ATOMIC-NEXT: .LBB111_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xchg ptr %a, i32 %b acquire
+ ret i32 %res
+}
+
+define i32 @atomicrmw_xchg_i32_release(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xchg_i32_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI112_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB112_2
+; XTENSA-ATOMIC-NEXT: .LBB112_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB112_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB112_4
+; XTENSA-ATOMIC-NEXT: .LBB112_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB112_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB112_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB112_1
+; XTENSA-ATOMIC-NEXT: .LBB112_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xchg ptr %a, i32 %b release
+ ret i32 %res
+}
+
+define i32 @atomicrmw_xchg_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xchg_i32_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 4
+; XTENSA-NEXT: l32r a8, .LCPI113_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB113_2
+; XTENSA-ATOMIC-NEXT: .LBB113_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB113_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB113_4
+; XTENSA-ATOMIC-NEXT: .LBB113_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB113_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB113_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB113_1
+; XTENSA-ATOMIC-NEXT: .LBB113_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xchg ptr %a, i32 %b acq_rel
+ ret i32 %res
+}
+
+define i32 @atomicrmw_xchg_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xchg_i32_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI114_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xchg_i32_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB114_2
+; XTENSA-ATOMIC-NEXT: .LBB114_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB114_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB114_4
+; XTENSA-ATOMIC-NEXT: .LBB114_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB114_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB114_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB114_1
+; XTENSA-ATOMIC-NEXT: .LBB114_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xchg ptr %a, i32 %b seq_cst
+ ret i32 %res
+}
+
+define i32 @atomicrmw_add_i32_monotonic(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_add_i32_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI115_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB115_2
+; XTENSA-ATOMIC-NEXT: .LBB115_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB115_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB115_4
+; XTENSA-ATOMIC-NEXT: .LBB115_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: add a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB115_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB115_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB115_1
+; XTENSA-ATOMIC-NEXT: .LBB115_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw add ptr %a, i32 %b monotonic
+ ret i32 %res
+}
+
+define i32 @atomicrmw_add_i32_acquire(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_add_i32_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 2
+; XTENSA-NEXT: l32r a8, .LCPI116_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB116_2
+; XTENSA-ATOMIC-NEXT: .LBB116_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB116_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB116_4
+; XTENSA-ATOMIC-NEXT: .LBB116_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: add a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB116_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB116_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB116_1
+; XTENSA-ATOMIC-NEXT: .LBB116_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw add ptr %a, i32 %b acquire
+ ret i32 %res
+}
+
+define i32 @atomicrmw_add_i32_release(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_add_i32_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI117_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB117_2
+; XTENSA-ATOMIC-NEXT: .LBB117_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB117_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB117_4
+; XTENSA-ATOMIC-NEXT: .LBB117_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: add a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB117_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB117_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB117_1
+; XTENSA-ATOMIC-NEXT: .LBB117_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw add ptr %a, i32 %b release
+ ret i32 %res
+}
+
+define i32 @atomicrmw_add_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_add_i32_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 4
+; XTENSA-NEXT: l32r a8, .LCPI118_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB118_2
+; XTENSA-ATOMIC-NEXT: .LBB118_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB118_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB118_4
+; XTENSA-ATOMIC-NEXT: .LBB118_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: add a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB118_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB118_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB118_1
+; XTENSA-ATOMIC-NEXT: .LBB118_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw add ptr %a, i32 %b acq_rel
+ ret i32 %res
+}
+
+define i32 @atomicrmw_add_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_add_i32_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI119_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_add_i32_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB119_2
+; XTENSA-ATOMIC-NEXT: .LBB119_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB119_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB119_4
+; XTENSA-ATOMIC-NEXT: .LBB119_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: add a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB119_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB119_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB119_1
+; XTENSA-ATOMIC-NEXT: .LBB119_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw add ptr %a, i32 %b seq_cst
+ ret i32 %res
+}
+
+define i32 @atomicrmw_sub_i32_monotonic(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_sub_i32_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI120_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB120_2
+; XTENSA-ATOMIC-NEXT: .LBB120_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB120_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB120_4
+; XTENSA-ATOMIC-NEXT: .LBB120_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: sub a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB120_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB120_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB120_1
+; XTENSA-ATOMIC-NEXT: .LBB120_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw sub ptr %a, i32 %b monotonic
+ ret i32 %res
+}
+
+define i32 @atomicrmw_sub_i32_acquire(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_sub_i32_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 2
+; XTENSA-NEXT: l32r a8, .LCPI121_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB121_2
+; XTENSA-ATOMIC-NEXT: .LBB121_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB121_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB121_4
+; XTENSA-ATOMIC-NEXT: .LBB121_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: sub a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB121_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB121_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB121_1
+; XTENSA-ATOMIC-NEXT: .LBB121_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw sub ptr %a, i32 %b acquire
+ ret i32 %res
+}
+
+define i32 @atomicrmw_sub_i32_release(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_sub_i32_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI122_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB122_2
+; XTENSA-ATOMIC-NEXT: .LBB122_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB122_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB122_4
+; XTENSA-ATOMIC-NEXT: .LBB122_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: sub a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB122_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB122_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB122_1
+; XTENSA-ATOMIC-NEXT: .LBB122_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw sub ptr %a, i32 %b release
+ ret i32 %res
+}
+
+define i32 @atomicrmw_sub_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_sub_i32_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 4
+; XTENSA-NEXT: l32r a8, .LCPI123_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB123_2
+; XTENSA-ATOMIC-NEXT: .LBB123_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB123_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB123_4
+; XTENSA-ATOMIC-NEXT: .LBB123_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: sub a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB123_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB123_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB123_1
+; XTENSA-ATOMIC-NEXT: .LBB123_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw sub ptr %a, i32 %b acq_rel
+ ret i32 %res
+}
+
+define i32 @atomicrmw_sub_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_sub_i32_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI124_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_sub_i32_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB124_2
+; XTENSA-ATOMIC-NEXT: .LBB124_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB124_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB124_4
+; XTENSA-ATOMIC-NEXT: .LBB124_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: sub a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB124_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB124_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB124_1
+; XTENSA-ATOMIC-NEXT: .LBB124_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw sub ptr %a, i32 %b seq_cst
+ ret i32 %res
+}
+
+define i32 @atomicrmw_and_i32_monotonic(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_and_i32_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI125_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB125_2
+; XTENSA-ATOMIC-NEXT: .LBB125_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB125_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB125_4
+; XTENSA-ATOMIC-NEXT: .LBB125_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB125_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB125_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB125_1
+; XTENSA-ATOMIC-NEXT: .LBB125_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw and ptr %a, i32 %b monotonic
+ ret i32 %res
+}
+
+define i32 @atomicrmw_and_i32_acquire(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_and_i32_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 2
+; XTENSA-NEXT: l32r a8, .LCPI126_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB126_2
+; XTENSA-ATOMIC-NEXT: .LBB126_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB126_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB126_4
+; XTENSA-ATOMIC-NEXT: .LBB126_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB126_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB126_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB126_1
+; XTENSA-ATOMIC-NEXT: .LBB126_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw and ptr %a, i32 %b acquire
+ ret i32 %res
+}
+
+define i32 @atomicrmw_and_i32_release(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_and_i32_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI127_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB127_2
+; XTENSA-ATOMIC-NEXT: .LBB127_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB127_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB127_4
+; XTENSA-ATOMIC-NEXT: .LBB127_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB127_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB127_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB127_1
+; XTENSA-ATOMIC-NEXT: .LBB127_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw and ptr %a, i32 %b release
+ ret i32 %res
+}
+
+define i32 @atomicrmw_and_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_and_i32_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 4
+; XTENSA-NEXT: l32r a8, .LCPI128_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB128_2
+; XTENSA-ATOMIC-NEXT: .LBB128_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB128_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB128_4
+; XTENSA-ATOMIC-NEXT: .LBB128_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB128_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB128_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB128_1
+; XTENSA-ATOMIC-NEXT: .LBB128_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw and ptr %a, i32 %b acq_rel
+ ret i32 %res
+}
+
+define i32 @atomicrmw_and_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_and_i32_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI129_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_and_i32_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB129_2
+; XTENSA-ATOMIC-NEXT: .LBB129_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB129_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB129_4
+; XTENSA-ATOMIC-NEXT: .LBB129_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB129_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB129_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB129_1
+; XTENSA-ATOMIC-NEXT: .LBB129_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw and ptr %a, i32 %b seq_cst
+ ret i32 %res
+}
+
+;define i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind {
+; %res = atomicrmw nand ptr %a, i32 %b monotonic
+; ret i32 %res
+;}
+;
+;define i32 @atomicrmw_nand_i32_acquire(ptr %a, i32 %b) nounwind {
+; %res = atomicrmw nand ptr %a, i32 %b acquire
+; ret i32 %res
+;}
+;
+;define i32 @atomicrmw_nand_i32_release(ptr %a, i32 %b) nounwind {
+; %res = atomicrmw nand ptr %a, i32 %b release
+; ret i32 %res
+;}
+;
+;define i32 @atomicrmw_nand_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; %res = atomicrmw nand ptr %a, i32 %b acq_rel
+; ret i32 %res
+;}
+;
+;define i32 @atomicrmw_nand_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; %res = atomicrmw nand ptr %a, i32 %b seq_cst
+; ret i32 %res
+;}
+
+define i32 @atomicrmw_or_i32_monotonic(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_or_i32_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI130_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB130_2
+; XTENSA-ATOMIC-NEXT: .LBB130_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB130_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB130_4
+; XTENSA-ATOMIC-NEXT: .LBB130_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB130_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB130_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB130_1
+; XTENSA-ATOMIC-NEXT: .LBB130_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw or ptr %a, i32 %b monotonic
+ ret i32 %res
+}
+
+define i32 @atomicrmw_or_i32_acquire(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_or_i32_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 2
+; XTENSA-NEXT: l32r a8, .LCPI131_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB131_2
+; XTENSA-ATOMIC-NEXT: .LBB131_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB131_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB131_4
+; XTENSA-ATOMIC-NEXT: .LBB131_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB131_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB131_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB131_1
+; XTENSA-ATOMIC-NEXT: .LBB131_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw or ptr %a, i32 %b acquire
+ ret i32 %res
+}
+
+define i32 @atomicrmw_or_i32_release(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_or_i32_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI132_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB132_2
+; XTENSA-ATOMIC-NEXT: .LBB132_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB132_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB132_4
+; XTENSA-ATOMIC-NEXT: .LBB132_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB132_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB132_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB132_1
+; XTENSA-ATOMIC-NEXT: .LBB132_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw or ptr %a, i32 %b release
+ ret i32 %res
+}
+
+define i32 @atomicrmw_or_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_or_i32_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 4
+; XTENSA-NEXT: l32r a8, .LCPI133_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB133_2
+; XTENSA-ATOMIC-NEXT: .LBB133_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB133_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB133_4
+; XTENSA-ATOMIC-NEXT: .LBB133_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB133_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB133_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB133_1
+; XTENSA-ATOMIC-NEXT: .LBB133_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw or ptr %a, i32 %b acq_rel
+ ret i32 %res
+}
+
+define i32 @atomicrmw_or_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_or_i32_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI134_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_or_i32_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB134_2
+; XTENSA-ATOMIC-NEXT: .LBB134_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB134_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB134_4
+; XTENSA-ATOMIC-NEXT: .LBB134_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB134_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB134_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB134_1
+; XTENSA-ATOMIC-NEXT: .LBB134_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw or ptr %a, i32 %b seq_cst
+ ret i32 %res
+}
+
+define i32 @atomicrmw_xor_i32_monotonic(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xor_i32_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI135_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB135_2
+; XTENSA-ATOMIC-NEXT: .LBB135_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB135_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB135_4
+; XTENSA-ATOMIC-NEXT: .LBB135_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: xor a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB135_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB135_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB135_1
+; XTENSA-ATOMIC-NEXT: .LBB135_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xor ptr %a, i32 %b monotonic
+ ret i32 %res
+}
+
+define i32 @atomicrmw_xor_i32_acquire(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xor_i32_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 2
+; XTENSA-NEXT: l32r a8, .LCPI136_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB136_2
+; XTENSA-ATOMIC-NEXT: .LBB136_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB136_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB136_4
+; XTENSA-ATOMIC-NEXT: .LBB136_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: xor a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB136_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB136_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB136_1
+; XTENSA-ATOMIC-NEXT: .LBB136_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xor ptr %a, i32 %b acquire
+ ret i32 %res
+}
+
+define i32 @atomicrmw_xor_i32_release(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xor_i32_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI137_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB137_2
+; XTENSA-ATOMIC-NEXT: .LBB137_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB137_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB137_4
+; XTENSA-ATOMIC-NEXT: .LBB137_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: xor a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB137_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB137_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB137_1
+; XTENSA-ATOMIC-NEXT: .LBB137_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xor ptr %a, i32 %b release
+ ret i32 %res
+}
+
+define i32 @atomicrmw_xor_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xor_i32_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 4
+; XTENSA-NEXT: l32r a8, .LCPI138_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB138_2
+; XTENSA-ATOMIC-NEXT: .LBB138_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB138_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB138_4
+; XTENSA-ATOMIC-NEXT: .LBB138_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: xor a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB138_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB138_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB138_1
+; XTENSA-ATOMIC-NEXT: .LBB138_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xor ptr %a, i32 %b acq_rel
+ ret i32 %res
+}
+
+define i32 @atomicrmw_xor_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_xor_i32_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a11, a3, a3
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI139_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_xor_i32_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB139_2
+; XTENSA-ATOMIC-NEXT: .LBB139_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB139_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB139_4
+; XTENSA-ATOMIC-NEXT: .LBB139_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: xor a8, a11, a3
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB139_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB139_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB139_1
+; XTENSA-ATOMIC-NEXT: .LBB139_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw xor ptr %a, i32 %b seq_cst
+ ret i32 %res
+}
+
+define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_max_i32_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l32i a2, a6, 0
+; XTENSA-NEXT: movi a7, 0
+; XTENSA-NEXT: l32r a5, .LCPI140_0
+; XTENSA-NEXT: j .LBB140_2
+; XTENSA-NEXT: .LBB140_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB140_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB140_4
+; XTENSA-NEXT: .LBB140_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bge a3, a2, .LBB140_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB140_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB140_1
+; XTENSA-NEXT: .LBB140_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_max_i32_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB140_2
+; XTENSA-ATOMIC-NEXT: .LBB140_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB140_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB140_6
+; XTENSA-ATOMIC-NEXT: .LBB140_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: bge a3, a11, .LBB140_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB140_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB140_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB140_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB140_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB140_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB140_1
+; XTENSA-ATOMIC-NEXT: .LBB140_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw max ptr %a, i32 %b monotonic
+ ret i32 %res
+}
+
+define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_max_i32_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l32i a2, a6, 0
+; XTENSA-NEXT: movi a7, 2
+; XTENSA-NEXT: l32r a5, .LCPI141_0
+; XTENSA-NEXT: j .LBB141_2
+; XTENSA-NEXT: .LBB141_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB141_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB141_4
+; XTENSA-NEXT: .LBB141_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bge a3, a2, .LBB141_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB141_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB141_1
+; XTENSA-NEXT: .LBB141_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_max_i32_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB141_2
+; XTENSA-ATOMIC-NEXT: .LBB141_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB141_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB141_6
+; XTENSA-ATOMIC-NEXT: .LBB141_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: bge a3, a11, .LBB141_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB141_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB141_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB141_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB141_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB141_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB141_1
+; XTENSA-ATOMIC-NEXT: .LBB141_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw max ptr %a, i32 %b acquire
+ ret i32 %res
+}
+
+define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_max_i32_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a5, a2, a2
+; XTENSA-NEXT: l32i a2, a5, 0
+; XTENSA-NEXT: movi a7, 3
+; XTENSA-NEXT: movi a6, 0
+; XTENSA-NEXT: l32r a4, .LCPI142_0
+; XTENSA-NEXT: j .LBB142_2
+; XTENSA-NEXT: .LBB142_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB142_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a5, a5
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB142_4
+; XTENSA-NEXT: .LBB142_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bge a3, a2, .LBB142_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB142_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB142_1
+; XTENSA-NEXT: .LBB142_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_max_i32_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB142_2
+; XTENSA-ATOMIC-NEXT: .LBB142_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB142_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB142_6
+; XTENSA-ATOMIC-NEXT: .LBB142_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: bge a3, a11, .LBB142_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB142_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB142_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB142_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB142_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB142_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB142_1
+; XTENSA-ATOMIC-NEXT: .LBB142_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw max ptr %a, i32 %b release
+ ret i32 %res
+}
+
+define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_max_i32_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a5, a2, a2
+; XTENSA-NEXT: l32i a2, a5, 0
+; XTENSA-NEXT: movi a7, 4
+; XTENSA-NEXT: movi a6, 2
+; XTENSA-NEXT: l32r a4, .LCPI143_0
+; XTENSA-NEXT: j .LBB143_2
+; XTENSA-NEXT: .LBB143_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB143_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a5, a5
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB143_4
+; XTENSA-NEXT: .LBB143_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bge a3, a2, .LBB143_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB143_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB143_1
+; XTENSA-NEXT: .LBB143_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_max_i32_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB143_2
+; XTENSA-ATOMIC-NEXT: .LBB143_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB143_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB143_6
+; XTENSA-ATOMIC-NEXT: .LBB143_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: bge a3, a11, .LBB143_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB143_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB143_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB143_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB143_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB143_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB143_1
+; XTENSA-ATOMIC-NEXT: .LBB143_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw max ptr %a, i32 %b acq_rel
+ ret i32 %res
+}
+
+define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_max_i32_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l32i a2, a6, 0
+; XTENSA-NEXT: movi a7, 5
+; XTENSA-NEXT: l32r a5, .LCPI144_0
+; XTENSA-NEXT: j .LBB144_2
+; XTENSA-NEXT: .LBB144_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB144_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB144_4
+; XTENSA-NEXT: .LBB144_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bge a3, a2, .LBB144_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB144_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB144_1
+; XTENSA-NEXT: .LBB144_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_max_i32_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB144_2
+; XTENSA-ATOMIC-NEXT: .LBB144_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB144_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB144_6
+; XTENSA-ATOMIC-NEXT: .LBB144_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: bge a3, a11, .LBB144_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB144_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB144_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB144_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB144_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB144_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB144_1
+; XTENSA-ATOMIC-NEXT: .LBB144_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw max ptr %a, i32 %b seq_cst
+ ret i32 %res
+}
+
+define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_min_i32_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l32i a2, a6, 0
+; XTENSA-NEXT: movi a7, 0
+; XTENSA-NEXT: l32r a5, .LCPI145_0
+; XTENSA-NEXT: j .LBB145_2
+; XTENSA-NEXT: .LBB145_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB145_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB145_4
+; XTENSA-NEXT: .LBB145_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: blt a3, a2, .LBB145_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB145_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB145_1
+; XTENSA-NEXT: .LBB145_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_min_i32_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB145_2
+; XTENSA-ATOMIC-NEXT: .LBB145_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB145_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB145_6
+; XTENSA-ATOMIC-NEXT: .LBB145_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: blt a3, a11, .LBB145_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB145_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB145_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB145_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB145_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB145_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB145_1
+; XTENSA-ATOMIC-NEXT: .LBB145_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw min ptr %a, i32 %b monotonic
+ ret i32 %res
+}
+
+define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_min_i32_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l32i a2, a6, 0
+; XTENSA-NEXT: movi a7, 2
+; XTENSA-NEXT: l32r a5, .LCPI146_0
+; XTENSA-NEXT: j .LBB146_2
+; XTENSA-NEXT: .LBB146_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB146_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB146_4
+; XTENSA-NEXT: .LBB146_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: blt a3, a2, .LBB146_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB146_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB146_1
+; XTENSA-NEXT: .LBB146_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_min_i32_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB146_2
+; XTENSA-ATOMIC-NEXT: .LBB146_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB146_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB146_6
+; XTENSA-ATOMIC-NEXT: .LBB146_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: blt a3, a11, .LBB146_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB146_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB146_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB146_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB146_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB146_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB146_1
+; XTENSA-ATOMIC-NEXT: .LBB146_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw min ptr %a, i32 %b acquire
+ ret i32 %res
+}
+
+define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_min_i32_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a5, a2, a2
+; XTENSA-NEXT: l32i a2, a5, 0
+; XTENSA-NEXT: movi a7, 3
+; XTENSA-NEXT: movi a6, 0
+; XTENSA-NEXT: l32r a4, .LCPI147_0
+; XTENSA-NEXT: j .LBB147_2
+; XTENSA-NEXT: .LBB147_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB147_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a5, a5
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB147_4
+; XTENSA-NEXT: .LBB147_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: blt a3, a2, .LBB147_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB147_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB147_1
+; XTENSA-NEXT: .LBB147_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_min_i32_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB147_2
+; XTENSA-ATOMIC-NEXT: .LBB147_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB147_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB147_6
+; XTENSA-ATOMIC-NEXT: .LBB147_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: blt a3, a11, .LBB147_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB147_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB147_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB147_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB147_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB147_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB147_1
+; XTENSA-ATOMIC-NEXT: .LBB147_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw min ptr %a, i32 %b release
+ ret i32 %res
+}
+
+define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_min_i32_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a5, a2, a2
+; XTENSA-NEXT: l32i a2, a5, 0
+; XTENSA-NEXT: movi a7, 4
+; XTENSA-NEXT: movi a6, 2
+; XTENSA-NEXT: l32r a4, .LCPI148_0
+; XTENSA-NEXT: j .LBB148_2
+; XTENSA-NEXT: .LBB148_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB148_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a5, a5
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB148_4
+; XTENSA-NEXT: .LBB148_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: blt a3, a2, .LBB148_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB148_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB148_1
+; XTENSA-NEXT: .LBB148_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_min_i32_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB148_2
+; XTENSA-ATOMIC-NEXT: .LBB148_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB148_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB148_6
+; XTENSA-ATOMIC-NEXT: .LBB148_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: blt a3, a11, .LBB148_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB148_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB148_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB148_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB148_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB148_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB148_1
+; XTENSA-ATOMIC-NEXT: .LBB148_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw min ptr %a, i32 %b acq_rel
+ ret i32 %res
+}
+
+define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_min_i32_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l32i a2, a6, 0
+; XTENSA-NEXT: movi a7, 5
+; XTENSA-NEXT: l32r a5, .LCPI149_0
+; XTENSA-NEXT: j .LBB149_2
+; XTENSA-NEXT: .LBB149_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB149_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB149_4
+; XTENSA-NEXT: .LBB149_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: blt a3, a2, .LBB149_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB149_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB149_1
+; XTENSA-NEXT: .LBB149_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_min_i32_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB149_2
+; XTENSA-ATOMIC-NEXT: .LBB149_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB149_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB149_6
+; XTENSA-ATOMIC-NEXT: .LBB149_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: blt a3, a11, .LBB149_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB149_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB149_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB149_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB149_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB149_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB149_1
+; XTENSA-ATOMIC-NEXT: .LBB149_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw min ptr %a, i32 %b seq_cst
+ ret i32 %res
+}
+
+define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umax_i32_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l32i a2, a6, 0
+; XTENSA-NEXT: movi a7, 0
+; XTENSA-NEXT: l32r a5, .LCPI150_0
+; XTENSA-NEXT: j .LBB150_2
+; XTENSA-NEXT: .LBB150_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB150_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB150_4
+; XTENSA-NEXT: .LBB150_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bgeu a3, a2, .LBB150_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB150_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB150_1
+; XTENSA-NEXT: .LBB150_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i32_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB150_2
+; XTENSA-ATOMIC-NEXT: .LBB150_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB150_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB150_6
+; XTENSA-ATOMIC-NEXT: .LBB150_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: bgeu a3, a11, .LBB150_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB150_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB150_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB150_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB150_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB150_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB150_1
+; XTENSA-ATOMIC-NEXT: .LBB150_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umax ptr %a, i32 %b monotonic
+ ret i32 %res
+}
+
+define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umax_i32_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l32i a2, a6, 0
+; XTENSA-NEXT: movi a7, 2
+; XTENSA-NEXT: l32r a5, .LCPI151_0
+; XTENSA-NEXT: j .LBB151_2
+; XTENSA-NEXT: .LBB151_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB151_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB151_4
+; XTENSA-NEXT: .LBB151_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bgeu a3, a2, .LBB151_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB151_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB151_1
+; XTENSA-NEXT: .LBB151_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i32_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB151_2
+; XTENSA-ATOMIC-NEXT: .LBB151_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB151_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB151_6
+; XTENSA-ATOMIC-NEXT: .LBB151_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: bgeu a3, a11, .LBB151_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB151_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB151_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB151_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB151_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB151_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB151_1
+; XTENSA-ATOMIC-NEXT: .LBB151_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umax ptr %a, i32 %b acquire
+ ret i32 %res
+}
+
+define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umax_i32_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a5, a2, a2
+; XTENSA-NEXT: l32i a2, a5, 0
+; XTENSA-NEXT: movi a7, 3
+; XTENSA-NEXT: movi a6, 0
+; XTENSA-NEXT: l32r a4, .LCPI152_0
+; XTENSA-NEXT: j .LBB152_2
+; XTENSA-NEXT: .LBB152_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB152_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a5, a5
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB152_4
+; XTENSA-NEXT: .LBB152_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bgeu a3, a2, .LBB152_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB152_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB152_1
+; XTENSA-NEXT: .LBB152_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i32_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB152_2
+; XTENSA-ATOMIC-NEXT: .LBB152_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB152_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB152_6
+; XTENSA-ATOMIC-NEXT: .LBB152_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: bgeu a3, a11, .LBB152_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB152_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB152_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB152_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB152_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB152_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB152_1
+; XTENSA-ATOMIC-NEXT: .LBB152_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umax ptr %a, i32 %b release
+ ret i32 %res
+}
+
+define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umax_i32_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a5, a2, a2
+; XTENSA-NEXT: l32i a2, a5, 0
+; XTENSA-NEXT: movi a7, 4
+; XTENSA-NEXT: movi a6, 2
+; XTENSA-NEXT: l32r a4, .LCPI153_0
+; XTENSA-NEXT: j .LBB153_2
+; XTENSA-NEXT: .LBB153_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB153_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a5, a5
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB153_4
+; XTENSA-NEXT: .LBB153_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bgeu a3, a2, .LBB153_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB153_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB153_1
+; XTENSA-NEXT: .LBB153_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i32_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB153_2
+; XTENSA-ATOMIC-NEXT: .LBB153_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB153_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB153_6
+; XTENSA-ATOMIC-NEXT: .LBB153_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: bgeu a3, a11, .LBB153_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB153_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB153_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB153_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB153_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB153_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB153_1
+; XTENSA-ATOMIC-NEXT: .LBB153_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umax ptr %a, i32 %b acq_rel
+ ret i32 %res
+}
+
+define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umax_i32_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l32i a2, a6, 0
+; XTENSA-NEXT: movi a7, 5
+; XTENSA-NEXT: l32r a5, .LCPI154_0
+; XTENSA-NEXT: j .LBB154_2
+; XTENSA-NEXT: .LBB154_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB154_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB154_4
+; XTENSA-NEXT: .LBB154_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bgeu a3, a2, .LBB154_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB154_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB154_1
+; XTENSA-NEXT: .LBB154_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umax_i32_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB154_2
+; XTENSA-ATOMIC-NEXT: .LBB154_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB154_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB154_6
+; XTENSA-ATOMIC-NEXT: .LBB154_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: bgeu a3, a11, .LBB154_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB154_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB154_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB154_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB154_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB154_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB154_1
+; XTENSA-ATOMIC-NEXT: .LBB154_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umax ptr %a, i32 %b seq_cst
+ ret i32 %res
+}
+
+define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umin_i32_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l32i a2, a6, 0
+; XTENSA-NEXT: movi a7, 0
+; XTENSA-NEXT: l32r a5, .LCPI155_0
+; XTENSA-NEXT: j .LBB155_2
+; XTENSA-NEXT: .LBB155_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB155_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB155_4
+; XTENSA-NEXT: .LBB155_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bltu a3, a2, .LBB155_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB155_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB155_1
+; XTENSA-NEXT: .LBB155_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i32_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB155_2
+; XTENSA-ATOMIC-NEXT: .LBB155_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB155_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB155_6
+; XTENSA-ATOMIC-NEXT: .LBB155_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: bltu a3, a11, .LBB155_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB155_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB155_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB155_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB155_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB155_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB155_1
+; XTENSA-ATOMIC-NEXT: .LBB155_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umin ptr %a, i32 %b monotonic
+ ret i32 %res
+}
+
+define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umin_i32_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l32i a2, a6, 0
+; XTENSA-NEXT: movi a7, 2
+; XTENSA-NEXT: l32r a5, .LCPI156_0
+; XTENSA-NEXT: j .LBB156_2
+; XTENSA-NEXT: .LBB156_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB156_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB156_4
+; XTENSA-NEXT: .LBB156_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bltu a3, a2, .LBB156_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB156_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB156_1
+; XTENSA-NEXT: .LBB156_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i32_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB156_2
+; XTENSA-ATOMIC-NEXT: .LBB156_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB156_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB156_6
+; XTENSA-ATOMIC-NEXT: .LBB156_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: bltu a3, a11, .LBB156_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB156_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB156_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB156_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB156_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB156_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB156_1
+; XTENSA-ATOMIC-NEXT: .LBB156_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umin ptr %a, i32 %b acquire
+ ret i32 %res
+}
+
+define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umin_i32_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a5, a2, a2
+; XTENSA-NEXT: l32i a2, a5, 0
+; XTENSA-NEXT: movi a7, 3
+; XTENSA-NEXT: movi a6, 0
+; XTENSA-NEXT: l32r a4, .LCPI157_0
+; XTENSA-NEXT: j .LBB157_2
+; XTENSA-NEXT: .LBB157_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB157_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a5, a5
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB157_4
+; XTENSA-NEXT: .LBB157_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bltu a3, a2, .LBB157_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB157_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB157_1
+; XTENSA-NEXT: .LBB157_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i32_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB157_2
+; XTENSA-ATOMIC-NEXT: .LBB157_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB157_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB157_6
+; XTENSA-ATOMIC-NEXT: .LBB157_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: bltu a3, a11, .LBB157_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB157_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB157_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB157_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB157_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB157_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB157_1
+; XTENSA-ATOMIC-NEXT: .LBB157_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umin ptr %a, i32 %b release
+ ret i32 %res
+}
+
+define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umin_i32_acq_rel:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a5, a2, a2
+; XTENSA-NEXT: l32i a2, a5, 0
+; XTENSA-NEXT: movi a7, 4
+; XTENSA-NEXT: movi a6, 2
+; XTENSA-NEXT: l32r a4, .LCPI158_0
+; XTENSA-NEXT: j .LBB158_2
+; XTENSA-NEXT: .LBB158_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB158_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a5, a5
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a6, a6
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB158_4
+; XTENSA-NEXT: .LBB158_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bltu a3, a2, .LBB158_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB158_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB158_1
+; XTENSA-NEXT: .LBB158_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i32_acq_rel:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB158_2
+; XTENSA-ATOMIC-NEXT: .LBB158_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB158_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB158_6
+; XTENSA-ATOMIC-NEXT: .LBB158_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: bltu a3, a11, .LBB158_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB158_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB158_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB158_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB158_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB158_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB158_1
+; XTENSA-ATOMIC-NEXT: .LBB158_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umin ptr %a, i32 %b acq_rel
+ ret i32 %res
+}
+
+define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; XTENSA-LABEL: atomicrmw_umin_i32_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l32i a2, a6, 0
+; XTENSA-NEXT: movi a7, 5
+; XTENSA-NEXT: l32r a5, .LCPI159_0
+; XTENSA-NEXT: j .LBB159_2
+; XTENSA-NEXT: .LBB159_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB159_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB159_4
+; XTENSA-NEXT: .LBB159_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a3, a3
+; XTENSA-NEXT: bltu a3, a2, .LBB159_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB159_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB159_1
+; XTENSA-NEXT: .LBB159_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: atomicrmw_umin_i32_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB159_2
+; XTENSA-ATOMIC-NEXT: .LBB159_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB159_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB159_6
+; XTENSA-ATOMIC-NEXT: .LBB159_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a3, a3
+; XTENSA-ATOMIC-NEXT: bltu a3, a11, .LBB159_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB159_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB159_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB159_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB159_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB159_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB159_1
+; XTENSA-ATOMIC-NEXT: .LBB159_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = atomicrmw umin ptr %a, i32 %b seq_cst
+ ret i32 %res
+}
diff --git a/llvm/test/CodeGen/Xtensa/forced-atomics.ll b/llvm/test/CodeGen/Xtensa/forced-atomics.ll
new file mode 100644
index 0000000..eeec87b
--- /dev/null
+++ b/llvm/test/CodeGen/Xtensa/forced-atomics.ll
@@ -0,0 +1,1426 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=xtensa -mattr=+windowed < %s | FileCheck %s --check-prefixes=XTENSA
+; RUN: llc -mtriple=xtensa -mattr=+windowed,s32c1i -mattr=+forced-atomics < %s | FileCheck %s --check-prefixes=XTENSA-ATOMIC
+
+define i8 @load8(ptr %p) nounwind {
+; XTENSA-LABEL: load8:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 5
+; XTENSA-NEXT: l32r a8, .LCPI0_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: load8:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l8ui a2, a2, 0
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %v = load atomic i8, ptr %p seq_cst, align 1
+ ret i8 %v
+}
+
+define void @store8(ptr %p) nounwind {
+; XTENSA-LABEL: store8:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 0
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI1_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: store8:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: movi a8, 0
+; XTENSA-ATOMIC-NEXT: s8i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ store atomic i8 0, ptr %p seq_cst, align 1
+ ret void
+}
+
+define i8 @rmw8(ptr %p) nounwind {
+; XTENSA-LABEL: rmw8:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 1
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI2_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: rmw8:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 1
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: movi a11, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a11, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -1
+; XTENSA-ATOMIC-NEXT: xor a12, a11, a12
+; XTENSA-ATOMIC-NEXT: movi a13, -4
+; XTENSA-ATOMIC-NEXT: and a13, a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 0
+; XTENSA-ATOMIC-NEXT: j .LBB2_2
+; XTENSA-ATOMIC-NEXT: .LBB2_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB2_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB2_4
+; XTENSA-ATOMIC-NEXT: .LBB2_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a12
+; XTENSA-ATOMIC-NEXT: add a6, a15, a10
+; XTENSA-ATOMIC-NEXT: and a6, a6, a11
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a13, 0
+; XTENSA-ATOMIC-NEXT: or a6, a9, a9
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB2_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB2_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: j .LBB2_1
+; XTENSA-ATOMIC-NEXT: .LBB2_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %v = atomicrmw add ptr %p, i8 1 seq_cst, align 1
+ ret i8 %v
+}
+
+define i8 @cmpxchg8(ptr %p) nounwind {
+; XTENSA-LABEL: cmpxchg8:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a8, 0
+; XTENSA-NEXT: s8i a8, a1, 0
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: movi a12, 1
+; XTENSA-NEXT: movi a13, 5
+; XTENSA-NEXT: l32r a8, .LCPI3_0
+; XTENSA-NEXT: or a14, a13, a13
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: l8ui a2, a1, 0
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: cmpxchg8:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 255
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a10, 0
+; XTENSA-ATOMIC-NEXT: and a7, a11, a9
+; XTENSA-ATOMIC-NEXT: movi a11, 1
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a12, a11
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: .LBB3_1: # %partword.cmpxchg.loop
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: or a14, a15, a12
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: or a7, a11, a11
+; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB3_3
+; XTENSA-ATOMIC-NEXT: # %bb.2: # %partword.cmpxchg.loop
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB3_1 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: .LBB3_3: # %partword.cmpxchg.loop
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB3_1 Depth=1
+; XTENSA-ATOMIC-NEXT: bnez a7, .LBB3_5
+; XTENSA-ATOMIC-NEXT: # %bb.4: # %partword.cmpxchg.failure
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB3_1 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a14, a9
+; XTENSA-ATOMIC-NEXT: bne a15, a7, .LBB3_1
+; XTENSA-ATOMIC-NEXT: .LBB3_5: # %partword.cmpxchg.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = cmpxchg ptr %p, i8 0, i8 1 seq_cst seq_cst
+ %res.0 = extractvalue { i8, i1 } %res, 0
+ ret i8 %res.0
+}
+
+define i16 @load16(ptr %p) nounwind {
+; XTENSA-LABEL: load16:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 5
+; XTENSA-NEXT: l32r a8, .LCPI4_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: load16:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l16ui a2, a2, 0
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %v = load atomic i16, ptr %p seq_cst, align 2
+ ret i16 %v
+}
+
+define void @store16(ptr %p) nounwind {
+; XTENSA-LABEL: store16:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 0
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI5_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: store16:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: movi a8, 0
+; XTENSA-ATOMIC-NEXT: s16i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ store atomic i16 0, ptr %p seq_cst, align 2
+ ret void
+}
+
+define i16 @rmw16(ptr %p) nounwind {
+; XTENSA-LABEL: rmw16:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 1
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI6_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: rmw16:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: movi a9, 1
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a10, a9
+; XTENSA-ATOMIC-NEXT: l32r a11, .LCPI6_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a11, a11
+; XTENSA-ATOMIC-NEXT: movi a12, -1
+; XTENSA-ATOMIC-NEXT: xor a12, a11, a12
+; XTENSA-ATOMIC-NEXT: movi a13, -4
+; XTENSA-ATOMIC-NEXT: and a13, a2, a13
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a15, a13, 0
+; XTENSA-ATOMIC-NEXT: movi a14, 0
+; XTENSA-ATOMIC-NEXT: j .LBB6_2
+; XTENSA-ATOMIC-NEXT: .LBB6_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB6_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: beqi a6, 1, .LBB6_4
+; XTENSA-ATOMIC-NEXT: .LBB6_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a15, a12
+; XTENSA-ATOMIC-NEXT: add a6, a15, a10
+; XTENSA-ATOMIC-NEXT: and a6, a6, a11
+; XTENSA-ATOMIC-NEXT: or a7, a7, a6
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a7, a13, 0
+; XTENSA-ATOMIC-NEXT: or a6, a9, a9
+; XTENSA-ATOMIC-NEXT: beq a7, a15, .LBB6_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB6_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a6, a14, a14
+; XTENSA-ATOMIC-NEXT: j .LBB6_1
+; XTENSA-ATOMIC-NEXT: .LBB6_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a7
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %v = atomicrmw add ptr %p, i16 1 seq_cst, align 2
+ ret i16 %v
+}
+
+define i16 @cmpxchg16(ptr %p) nounwind {
+; XTENSA-LABEL: cmpxchg16:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a8, 0
+; XTENSA-NEXT: s16i a8, a1, 0
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: movi a12, 1
+; XTENSA-NEXT: movi a13, 5
+; XTENSA-NEXT: l32r a8, .LCPI7_0
+; XTENSA-NEXT: or a14, a13, a13
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: l16ui a2, a1, 0
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: cmpxchg16:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: slli a8, a2, 3
+; XTENSA-ATOMIC-NEXT: movi a9, 24
+; XTENSA-ATOMIC-NEXT: and a8, a8, a9
+; XTENSA-ATOMIC-NEXT: l32r a9, .LCPI7_0
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a9, a9
+; XTENSA-ATOMIC-NEXT: movi a10, -1
+; XTENSA-ATOMIC-NEXT: xor a9, a9, a10
+; XTENSA-ATOMIC-NEXT: movi a10, -4
+; XTENSA-ATOMIC-NEXT: and a10, a2, a10
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a10, 0
+; XTENSA-ATOMIC-NEXT: and a7, a11, a9
+; XTENSA-ATOMIC-NEXT: movi a11, 1
+; XTENSA-ATOMIC-NEXT: ssl a8
+; XTENSA-ATOMIC-NEXT: sll a12, a11
+; XTENSA-ATOMIC-NEXT: movi a13, 0
+; XTENSA-ATOMIC-NEXT: .LBB7_1: # %partword.cmpxchg.loop
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a15, a7, a7
+; XTENSA-ATOMIC-NEXT: or a14, a15, a12
+; XTENSA-ATOMIC-NEXT: wsr a15, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a14, a10, 0
+; XTENSA-ATOMIC-NEXT: or a7, a11, a11
+; XTENSA-ATOMIC-NEXT: beq a14, a15, .LBB7_3
+; XTENSA-ATOMIC-NEXT: # %bb.2: # %partword.cmpxchg.loop
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB7_1 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a13, a13
+; XTENSA-ATOMIC-NEXT: .LBB7_3: # %partword.cmpxchg.loop
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB7_1 Depth=1
+; XTENSA-ATOMIC-NEXT: bnez a7, .LBB7_5
+; XTENSA-ATOMIC-NEXT: # %bb.4: # %partword.cmpxchg.failure
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB7_1 Depth=1
+; XTENSA-ATOMIC-NEXT: and a7, a14, a9
+; XTENSA-ATOMIC-NEXT: bne a15, a7, .LBB7_1
+; XTENSA-ATOMIC-NEXT: .LBB7_5: # %partword.cmpxchg.end
+; XTENSA-ATOMIC-NEXT: ssr a8
+; XTENSA-ATOMIC-NEXT: srl a2, a14
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %res = cmpxchg ptr %p, i16 0, i16 1 seq_cst seq_cst
+ %res.0 = extractvalue { i16, i1 } %res, 0
+ ret i16 %res.0
+}
+
+define i32 @load32_unordered(ptr %p) nounwind {
+; XTENSA-LABEL: load32_unordered:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 0
+; XTENSA-NEXT: l32r a8, .LCPI8_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: load32_unordered:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0
+; XTENSA-ATOMIC-NEXT: retw
+ %v = load atomic i32, ptr %p unordered, align 4
+ ret i32 %v
+}
+
+define i32 @load32_monotonic(ptr %p) nounwind {
+; XTENSA-LABEL: load32_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 0
+; XTENSA-NEXT: l32r a8, .LCPI9_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: load32_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0
+; XTENSA-ATOMIC-NEXT: retw
+ %v = load atomic i32, ptr %p monotonic, align 4
+ ret i32 %v
+}
+
+define i32 @load32_acquire(ptr %p) nounwind {
+; XTENSA-LABEL: load32_acquire:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 2
+; XTENSA-NEXT: l32r a8, .LCPI10_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: load32_acquire:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %v = load atomic i32, ptr %p acquire, align 4
+ ret i32 %v
+}
+
+define i32 @load32_seq_cst(ptr %p) nounwind {
+; XTENSA-LABEL: load32_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 5
+; XTENSA-NEXT: l32r a8, .LCPI11_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: load32_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a2, a2, 0
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ %v = load atomic i32, ptr %p seq_cst, align 4
+ ret i32 %v
+}
+
+define void @store32_unordered(ptr %p) nounwind {
+; XTENSA-LABEL: store32_unordered:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 0
+; XTENSA-NEXT: l32r a8, .LCPI12_0
+; XTENSA-NEXT: or a12, a11, a11
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: store32_unordered:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a8, 0
+; XTENSA-ATOMIC-NEXT: s32i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: retw
+ store atomic i32 0, ptr %p unordered, align 4
+ ret void
+}
+
+define void @store32_monotonic(ptr %p) nounwind {
+; XTENSA-LABEL: store32_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 0
+; XTENSA-NEXT: l32r a8, .LCPI13_0
+; XTENSA-NEXT: or a12, a11, a11
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: store32_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a8, 0
+; XTENSA-ATOMIC-NEXT: s32i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: retw
+ store atomic i32 0, ptr %p monotonic, align 4
+ ret void
+}
+
+define void @store32_release(ptr %p) nounwind {
+; XTENSA-LABEL: store32_release:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 0
+; XTENSA-NEXT: movi a12, 3
+; XTENSA-NEXT: l32r a8, .LCPI14_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: store32_release:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: movi a8, 0
+; XTENSA-ATOMIC-NEXT: s32i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: retw
+ store atomic i32 0, ptr %p release, align 4
+ ret void
+}
+
+define void @store32_seq_cst(ptr %p) nounwind {
+; XTENSA-LABEL: store32_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 0
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI15_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: store32_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: movi a8, 0
+; XTENSA-ATOMIC-NEXT: s32i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: retw
+ store atomic i32 0, ptr %p seq_cst, align 4
+ ret void
+}
+
+define i32 @rmw32_add_monotonic(ptr %p) nounwind {
+; XTENSA-LABEL: rmw32_add_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 1
+; XTENSA-NEXT: movi a12, 0
+; XTENSA-NEXT: l32r a8, .LCPI16_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: rmw32_add_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB16_2
+; XTENSA-ATOMIC-NEXT: .LBB16_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB16_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB16_4
+; XTENSA-ATOMIC-NEXT: .LBB16_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: addi a8, a11, 1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB16_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB16_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB16_1
+; XTENSA-ATOMIC-NEXT: .LBB16_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %v = atomicrmw add ptr %p, i32 1 monotonic, align 4
+ ret i32 %v
+}
+
+define i32 @rmw32_add_seq_cst(ptr %p) nounwind {
+; XTENSA-LABEL: rmw32_add_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 1
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI17_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: rmw32_add_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB17_2
+; XTENSA-ATOMIC-NEXT: .LBB17_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB17_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB17_4
+; XTENSA-ATOMIC-NEXT: .LBB17_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: addi a8, a11, 1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB17_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB17_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB17_1
+; XTENSA-ATOMIC-NEXT: .LBB17_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %v = atomicrmw add ptr %p, i32 1 seq_cst, align 4
+ ret i32 %v
+}
+
+define i32 @rmw32_sub_seq_cst(ptr %p) nounwind {
+; XTENSA-LABEL: rmw32_sub_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 1
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI18_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: rmw32_sub_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: movi a10, 1
+; XTENSA-ATOMIC-NEXT: j .LBB18_2
+; XTENSA-ATOMIC-NEXT: .LBB18_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB18_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB18_4
+; XTENSA-ATOMIC-NEXT: .LBB18_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: addi a8, a11, -1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB18_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB18_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: j .LBB18_1
+; XTENSA-ATOMIC-NEXT: .LBB18_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %v = atomicrmw sub ptr %p, i32 1 seq_cst, align 4
+ ret i32 %v
+}
+
+define i32 @rmw32_and_seq_cst(ptr %p) nounwind {
+; XTENSA-LABEL: rmw32_and_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 1
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI19_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: rmw32_and_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 1
+; XTENSA-ATOMIC-NEXT: movi a10, 0
+; XTENSA-ATOMIC-NEXT: j .LBB19_2
+; XTENSA-ATOMIC-NEXT: .LBB19_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB19_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB19_4
+; XTENSA-ATOMIC-NEXT: .LBB19_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: and a8, a11, a9
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB19_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB19_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: j .LBB19_1
+; XTENSA-ATOMIC-NEXT: .LBB19_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %v = atomicrmw and ptr %p, i32 1 seq_cst, align 4
+ ret i32 %v
+}
+
+define i32 @rmw32_nand_seq_cst(ptr %p) nounwind {
+; XTENSA-LABEL: rmw32_nand_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 1
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI20_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: rmw32_nand_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a13, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, -1
+; XTENSA-ATOMIC-NEXT: movi a10, -2
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: movi a12, 1
+; XTENSA-ATOMIC-NEXT: j .LBB20_2
+; XTENSA-ATOMIC-NEXT: .LBB20_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB20_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a14, 1, .LBB20_4
+; XTENSA-ATOMIC-NEXT: .LBB20_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: xor a8, a13, a9
+; XTENSA-ATOMIC-NEXT: or a8, a8, a10
+; XTENSA-ATOMIC-NEXT: wsr a13, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a14, a12, a12
+; XTENSA-ATOMIC-NEXT: beq a8, a13, .LBB20_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB20_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a14, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB20_1
+; XTENSA-ATOMIC-NEXT: .LBB20_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %v = atomicrmw nand ptr %p, i32 1 seq_cst, align 4
+ ret i32 %v
+}
+
+define i32 @rmw32_or_seq_cst(ptr %p) nounwind {
+; XTENSA-LABEL: rmw32_or_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 1
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI21_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: rmw32_or_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 1
+; XTENSA-ATOMIC-NEXT: movi a10, 0
+; XTENSA-ATOMIC-NEXT: j .LBB21_2
+; XTENSA-ATOMIC-NEXT: .LBB21_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB21_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB21_4
+; XTENSA-ATOMIC-NEXT: .LBB21_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a9
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB21_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB21_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: j .LBB21_1
+; XTENSA-ATOMIC-NEXT: .LBB21_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %v = atomicrmw or ptr %p, i32 1 seq_cst, align 4
+ ret i32 %v
+}
+
+define i32 @rmw32_xor_seq_cst(ptr %p) nounwind {
+; XTENSA-LABEL: rmw32_xor_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 1
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI22_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: rmw32_xor_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 1
+; XTENSA-ATOMIC-NEXT: movi a10, 0
+; XTENSA-ATOMIC-NEXT: j .LBB22_2
+; XTENSA-ATOMIC-NEXT: .LBB22_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB22_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB22_4
+; XTENSA-ATOMIC-NEXT: .LBB22_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: xor a8, a11, a9
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB22_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB22_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: j .LBB22_1
+; XTENSA-ATOMIC-NEXT: .LBB22_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %v = atomicrmw xor ptr %p, i32 1 seq_cst, align 4
+ ret i32 %v
+}
+
+define i32 @rmw32_max_seq_cst(ptr %p) nounwind {
+; XTENSA-LABEL: rmw32_max_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l32i a2, a6, 0
+; XTENSA-NEXT: movi a5, 1
+; XTENSA-NEXT: movi a7, 5
+; XTENSA-NEXT: l32r a4, .LCPI23_0
+; XTENSA-NEXT: j .LBB23_2
+; XTENSA-NEXT: .LBB23_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB23_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB23_4
+; XTENSA-NEXT: .LBB23_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a5, a5
+; XTENSA-NEXT: bge a5, a2, .LBB23_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB23_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB23_1
+; XTENSA-NEXT: .LBB23_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: rmw32_max_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 1
+; XTENSA-ATOMIC-NEXT: movi a10, 0
+; XTENSA-ATOMIC-NEXT: j .LBB23_2
+; XTENSA-ATOMIC-NEXT: .LBB23_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB23_6
+; XTENSA-ATOMIC-NEXT: .LBB23_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a9, a9
+; XTENSA-ATOMIC-NEXT: bge a9, a11, .LBB23_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB23_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB23_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB23_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: j .LBB23_1
+; XTENSA-ATOMIC-NEXT: .LBB23_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %v = atomicrmw max ptr %p, i32 1 seq_cst, align 4
+ ret i32 %v
+}
+
+define i32 @rmw32_min_seq_cst(ptr %p) nounwind {
+; XTENSA-LABEL: rmw32_min_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: l32i a12, a2, 0
+; XTENSA-NEXT: movi a6, 1
+; XTENSA-NEXT: movi a5, 2
+; XTENSA-NEXT: movi a7, 5
+; XTENSA-NEXT: l32r a4, .LCPI24_0
+; XTENSA-NEXT: j .LBB24_2
+; XTENSA-NEXT: .LBB24_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB24_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l32i a12, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB24_4
+; XTENSA-NEXT: .LBB24_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a12, a1, 0
+; XTENSA-NEXT: blt a12, a5, .LBB24_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB24_2 Depth=1
+; XTENSA-NEXT: or a12, a6, a6
+; XTENSA-NEXT: j .LBB24_1
+; XTENSA-NEXT: .LBB24_4: # %atomicrmw.end
+; XTENSA-NEXT: or a2, a12, a12
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: rmw32_min_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a12, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 1
+; XTENSA-ATOMIC-NEXT: movi a10, 2
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: or a8, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB24_2
+; XTENSA-ATOMIC-NEXT: .LBB24_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a13, 1, .LBB24_6
+; XTENSA-ATOMIC-NEXT: .LBB24_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: blt a12, a10, .LBB24_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a9, a9
+; XTENSA-ATOMIC-NEXT: .LBB24_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a12, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a13, a9, a9
+; XTENSA-ATOMIC-NEXT: beq a8, a12, .LBB24_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB24_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB24_1
+; XTENSA-ATOMIC-NEXT: .LBB24_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %v = atomicrmw min ptr %p, i32 1 seq_cst, align 4
+ ret i32 %v
+}
+
+define i32 @rmw32_umax_seq_cst(ptr %p) nounwind {
+; XTENSA-LABEL: rmw32_umax_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a6, a2, a2
+; XTENSA-NEXT: l32i a2, a6, 0
+; XTENSA-NEXT: movi a5, 1
+; XTENSA-NEXT: movi a7, 5
+; XTENSA-NEXT: l32r a4, .LCPI25_0
+; XTENSA-NEXT: j .LBB25_2
+; XTENSA-NEXT: .LBB25_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB25_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a6, a6
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB25_4
+; XTENSA-NEXT: .LBB25_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a2, a1, 0
+; XTENSA-NEXT: or a12, a5, a5
+; XTENSA-NEXT: bgeu a5, a2, .LBB25_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB25_2 Depth=1
+; XTENSA-NEXT: or a12, a2, a2
+; XTENSA-NEXT: j .LBB25_1
+; XTENSA-NEXT: .LBB25_4: # %atomicrmw.end
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: rmw32_umax_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 1
+; XTENSA-ATOMIC-NEXT: movi a10, 0
+; XTENSA-ATOMIC-NEXT: j .LBB25_2
+; XTENSA-ATOMIC-NEXT: .LBB25_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB25_6
+; XTENSA-ATOMIC-NEXT: .LBB25_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a9, a9
+; XTENSA-ATOMIC-NEXT: bgeu a9, a11, .LBB25_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a11, a11
+; XTENSA-ATOMIC-NEXT: .LBB25_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB25_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB25_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: j .LBB25_1
+; XTENSA-ATOMIC-NEXT: .LBB25_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %v = atomicrmw umax ptr %p, i32 1 seq_cst, align 4
+ ret i32 %v
+}
+
+define i32 @rmw32_umin_seq_cst(ptr %p) nounwind {
+; XTENSA-LABEL: rmw32_umin_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: l32i a12, a2, 0
+; XTENSA-NEXT: movi a6, 1
+; XTENSA-NEXT: movi a5, 2
+; XTENSA-NEXT: movi a7, 5
+; XTENSA-NEXT: l32r a4, .LCPI26_0
+; XTENSA-NEXT: j .LBB26_2
+; XTENSA-NEXT: .LBB26_1: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB26_2 Depth=1
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a4
+; XTENSA-NEXT: l32i a12, a1, 0
+; XTENSA-NEXT: bnez a10, .LBB26_4
+; XTENSA-NEXT: .LBB26_2: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a12, a1, 0
+; XTENSA-NEXT: bltu a12, a5, .LBB26_1
+; XTENSA-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-NEXT: # in Loop: Header=BB26_2 Depth=1
+; XTENSA-NEXT: or a12, a6, a6
+; XTENSA-NEXT: j .LBB26_1
+; XTENSA-NEXT: .LBB26_4: # %atomicrmw.end
+; XTENSA-NEXT: or a2, a12, a12
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: rmw32_umin_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a12, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 1
+; XTENSA-ATOMIC-NEXT: movi a10, 2
+; XTENSA-ATOMIC-NEXT: movi a11, 0
+; XTENSA-ATOMIC-NEXT: or a8, a12, a12
+; XTENSA-ATOMIC-NEXT: j .LBB26_2
+; XTENSA-ATOMIC-NEXT: .LBB26_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a13, 1, .LBB26_6
+; XTENSA-ATOMIC-NEXT: .LBB26_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: bltu a12, a10, .LBB26_4
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a9, a9
+; XTENSA-ATOMIC-NEXT: .LBB26_4: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a12, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a13, a9, a9
+; XTENSA-ATOMIC-NEXT: beq a8, a12, .LBB26_1
+; XTENSA-ATOMIC-NEXT: # %bb.5: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB26_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a13, a11, a11
+; XTENSA-ATOMIC-NEXT: j .LBB26_1
+; XTENSA-ATOMIC-NEXT: .LBB26_6: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %v = atomicrmw umin ptr %p, i32 1 seq_cst, align 4
+ ret i32 %v
+}
+
+define i32 @rmw32_xchg_seq_cst(ptr %p) nounwind {
+; XTENSA-LABEL: rmw32_xchg_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 32
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a11, 1
+; XTENSA-NEXT: movi a12, 5
+; XTENSA-NEXT: l32r a8, .LCPI27_0
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: rmw32_xchg_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a11, a2, 0
+; XTENSA-ATOMIC-NEXT: movi a9, 1
+; XTENSA-ATOMIC-NEXT: movi a10, 0
+; XTENSA-ATOMIC-NEXT: j .LBB27_2
+; XTENSA-ATOMIC-NEXT: .LBB27_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB27_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a11, a8, a8
+; XTENSA-ATOMIC-NEXT: beqi a12, 1, .LBB27_4
+; XTENSA-ATOMIC-NEXT: .LBB27_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: wsr a11, scompare1
+; XTENSA-ATOMIC-NEXT: or a8, a9, a9
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a12, a9, a9
+; XTENSA-ATOMIC-NEXT: beq a8, a11, .LBB27_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB27_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a12, a10, a10
+; XTENSA-ATOMIC-NEXT: j .LBB27_1
+; XTENSA-ATOMIC-NEXT: .LBB27_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %v = atomicrmw xchg ptr %p, i32 1 seq_cst, align 4
+ ret i32 %v
+}
+
+define float @rmw32_fadd_seq_cst(ptr %p) nounwind {
+; XTENSA-LABEL: rmw32_fadd_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: l32i a10, a2, 0
+; XTENSA-NEXT: l32r a6, .LCPI28_1
+; XTENSA-NEXT: movi a7, 5
+; XTENSA-NEXT: l32r a5, .LCPI28_2
+; XTENSA-NEXT: .LBB28_1: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a10, a1, 0
+; XTENSA-NEXT: l32r a11, .LCPI28_0
+; XTENSA-NEXT: callx8 a6
+; XTENSA-NEXT: or a12, a10, a10
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: or a8, a10, a10
+; XTENSA-NEXT: l32i a10, a1, 0
+; XTENSA-NEXT: beqz a8, .LBB28_1
+; XTENSA-NEXT: # %bb.2: # %atomicrmw.end
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: rmw32_fadd_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a7, a2, 0
+; XTENSA-ATOMIC-NEXT: l32r a6, .LCPI28_1
+; XTENSA-ATOMIC-NEXT: movi a5, 0
+; XTENSA-ATOMIC-NEXT: movi a4, 1
+; XTENSA-ATOMIC-NEXT: j .LBB28_2
+; XTENSA-ATOMIC-NEXT: .LBB28_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB28_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a10, a10
+; XTENSA-ATOMIC-NEXT: beqi a8, 1, .LBB28_4
+; XTENSA-ATOMIC-NEXT: .LBB28_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a11, .LCPI28_0
+; XTENSA-ATOMIC-NEXT: or a10, a7, a7
+; XTENSA-ATOMIC-NEXT: callx8 a6
+; XTENSA-ATOMIC-NEXT: wsr a7, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a10, a2, 0
+; XTENSA-ATOMIC-NEXT: or a8, a4, a4
+; XTENSA-ATOMIC-NEXT: beq a10, a7, .LBB28_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB28_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a5, a5
+; XTENSA-ATOMIC-NEXT: j .LBB28_1
+; XTENSA-ATOMIC-NEXT: .LBB28_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a10, a10
+; XTENSA-ATOMIC-NEXT: retw
+ %v = atomicrmw fadd ptr %p, float 1.0 seq_cst, align 4
+ ret float %v
+}
+
+define float @rmw32_fsub_seq_cst(ptr %p) nounwind {
+; XTENSA-LABEL: rmw32_fsub_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: l32i a10, a2, 0
+; XTENSA-NEXT: l32r a6, .LCPI29_1
+; XTENSA-NEXT: movi a7, 5
+; XTENSA-NEXT: l32r a5, .LCPI29_2
+; XTENSA-NEXT: .LBB29_1: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a10, a1, 0
+; XTENSA-NEXT: l32r a11, .LCPI29_0
+; XTENSA-NEXT: callx8 a6
+; XTENSA-NEXT: or a12, a10, a10
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: or a8, a10, a10
+; XTENSA-NEXT: l32i a10, a1, 0
+; XTENSA-NEXT: beqz a8, .LBB29_1
+; XTENSA-NEXT: # %bb.2: # %atomicrmw.end
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: rmw32_fsub_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a7, a2, 0
+; XTENSA-ATOMIC-NEXT: l32r a6, .LCPI29_1
+; XTENSA-ATOMIC-NEXT: movi a5, 0
+; XTENSA-ATOMIC-NEXT: movi a4, 1
+; XTENSA-ATOMIC-NEXT: j .LBB29_2
+; XTENSA-ATOMIC-NEXT: .LBB29_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB29_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a10, a10
+; XTENSA-ATOMIC-NEXT: beqi a8, 1, .LBB29_4
+; XTENSA-ATOMIC-NEXT: .LBB29_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a11, .LCPI29_0
+; XTENSA-ATOMIC-NEXT: or a10, a7, a7
+; XTENSA-ATOMIC-NEXT: callx8 a6
+; XTENSA-ATOMIC-NEXT: wsr a7, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a10, a2, 0
+; XTENSA-ATOMIC-NEXT: or a8, a4, a4
+; XTENSA-ATOMIC-NEXT: beq a10, a7, .LBB29_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB29_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a5, a5
+; XTENSA-ATOMIC-NEXT: j .LBB29_1
+; XTENSA-ATOMIC-NEXT: .LBB29_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a10, a10
+; XTENSA-ATOMIC-NEXT: retw
+ %v = atomicrmw fsub ptr %p, float 1.0 seq_cst, align 4
+ ret float %v
+}
+
+define float @rmw32_fmin_seq_cst(ptr %p) nounwind {
+; XTENSA-LABEL: rmw32_fmin_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: l32i a10, a2, 0
+; XTENSA-NEXT: l32r a6, .LCPI30_1
+; XTENSA-NEXT: movi a7, 5
+; XTENSA-NEXT: l32r a5, .LCPI30_2
+; XTENSA-NEXT: .LBB30_1: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a10, a1, 0
+; XTENSA-NEXT: l32r a11, .LCPI30_0
+; XTENSA-NEXT: callx8 a6
+; XTENSA-NEXT: or a12, a10, a10
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: or a8, a10, a10
+; XTENSA-NEXT: l32i a10, a1, 0
+; XTENSA-NEXT: beqz a8, .LBB30_1
+; XTENSA-NEXT: # %bb.2: # %atomicrmw.end
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: rmw32_fmin_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a7, a2, 0
+; XTENSA-ATOMIC-NEXT: l32r a6, .LCPI30_1
+; XTENSA-ATOMIC-NEXT: movi a5, 0
+; XTENSA-ATOMIC-NEXT: movi a4, 1
+; XTENSA-ATOMIC-NEXT: j .LBB30_2
+; XTENSA-ATOMIC-NEXT: .LBB30_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB30_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a10, a10
+; XTENSA-ATOMIC-NEXT: beqi a8, 1, .LBB30_4
+; XTENSA-ATOMIC-NEXT: .LBB30_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a11, .LCPI30_0
+; XTENSA-ATOMIC-NEXT: or a10, a7, a7
+; XTENSA-ATOMIC-NEXT: callx8 a6
+; XTENSA-ATOMIC-NEXT: wsr a7, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a10, a2, 0
+; XTENSA-ATOMIC-NEXT: or a8, a4, a4
+; XTENSA-ATOMIC-NEXT: beq a10, a7, .LBB30_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB30_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a5, a5
+; XTENSA-ATOMIC-NEXT: j .LBB30_1
+; XTENSA-ATOMIC-NEXT: .LBB30_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a10, a10
+; XTENSA-ATOMIC-NEXT: retw
+ %v = atomicrmw fmin ptr %p, float 1.0 seq_cst, align 4
+ ret float %v
+}
+
+define float @rmw32_fmax_seq_cst(ptr %p) nounwind {
+; XTENSA-LABEL: rmw32_fmax_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: l32i a10, a2, 0
+; XTENSA-NEXT: l32r a6, .LCPI31_1
+; XTENSA-NEXT: movi a7, 5
+; XTENSA-NEXT: l32r a5, .LCPI31_2
+; XTENSA-NEXT: .LBB31_1: # %atomicrmw.start
+; XTENSA-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-NEXT: s32i a10, a1, 0
+; XTENSA-NEXT: l32r a11, .LCPI31_0
+; XTENSA-NEXT: callx8 a6
+; XTENSA-NEXT: or a12, a10, a10
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: or a13, a7, a7
+; XTENSA-NEXT: or a14, a7, a7
+; XTENSA-NEXT: callx8 a5
+; XTENSA-NEXT: or a8, a10, a10
+; XTENSA-NEXT: l32i a10, a1, 0
+; XTENSA-NEXT: beqz a8, .LBB31_1
+; XTENSA-NEXT: # %bb.2: # %atomicrmw.end
+; XTENSA-NEXT: or a2, a10, a10
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: rmw32_fmax_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: l32i a7, a2, 0
+; XTENSA-ATOMIC-NEXT: l32r a6, .LCPI31_1
+; XTENSA-ATOMIC-NEXT: movi a5, 0
+; XTENSA-ATOMIC-NEXT: movi a4, 1
+; XTENSA-ATOMIC-NEXT: j .LBB31_2
+; XTENSA-ATOMIC-NEXT: .LBB31_1: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB31_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a7, a10, a10
+; XTENSA-ATOMIC-NEXT: beqi a8, 1, .LBB31_4
+; XTENSA-ATOMIC-NEXT: .LBB31_2: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
+; XTENSA-ATOMIC-NEXT: l32r a11, .LCPI31_0
+; XTENSA-ATOMIC-NEXT: or a10, a7, a7
+; XTENSA-ATOMIC-NEXT: callx8 a6
+; XTENSA-ATOMIC-NEXT: wsr a7, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a10, a2, 0
+; XTENSA-ATOMIC-NEXT: or a8, a4, a4
+; XTENSA-ATOMIC-NEXT: beq a10, a7, .LBB31_1
+; XTENSA-ATOMIC-NEXT: # %bb.3: # %atomicrmw.start
+; XTENSA-ATOMIC-NEXT: # in Loop: Header=BB31_2 Depth=1
+; XTENSA-ATOMIC-NEXT: or a8, a5, a5
+; XTENSA-ATOMIC-NEXT: j .LBB31_1
+; XTENSA-ATOMIC-NEXT: .LBB31_4: # %atomicrmw.end
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a10, a10
+; XTENSA-ATOMIC-NEXT: retw
+ %v = atomicrmw fmax ptr %p, float 1.0 seq_cst, align 4
+ ret float %v
+}
+
+define i32 @cmpxchg32_monotonic(ptr %p) nounwind {
+; XTENSA-LABEL: cmpxchg32_monotonic:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a13, 0
+; XTENSA-NEXT: s32i a13, a1, 0
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: movi a12, 1
+; XTENSA-NEXT: l32r a8, .LCPI32_0
+; XTENSA-NEXT: or a14, a13, a13
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: cmpxchg32_monotonic:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: movi a8, 1
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: wsr a9, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = cmpxchg ptr %p, i32 0, i32 1 monotonic monotonic
+ %res.0 = extractvalue { i32, i1 } %res, 0
+ ret i32 %res.0
+}
+
+define i32 @cmpxchg32_seq_cst(ptr %p) nounwind {
+; XTENSA-LABEL: cmpxchg32_seq_cst:
+; XTENSA: # %bb.0:
+; XTENSA-NEXT: entry a1, 48
+; XTENSA-NEXT: or a10, a2, a2
+; XTENSA-NEXT: movi a8, 0
+; XTENSA-NEXT: s32i a8, a1, 0
+; XTENSA-NEXT: addi a11, a1, 0
+; XTENSA-NEXT: movi a12, 1
+; XTENSA-NEXT: movi a13, 5
+; XTENSA-NEXT: l32r a8, .LCPI33_0
+; XTENSA-NEXT: or a14, a13, a13
+; XTENSA-NEXT: callx8 a8
+; XTENSA-NEXT: l32i a2, a1, 0
+; XTENSA-NEXT: retw
+;
+; XTENSA-ATOMIC-LABEL: cmpxchg32_seq_cst:
+; XTENSA-ATOMIC: # %bb.0:
+; XTENSA-ATOMIC-NEXT: entry a1, 32
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: movi a8, 1
+; XTENSA-ATOMIC-NEXT: movi a9, 0
+; XTENSA-ATOMIC-NEXT: wsr a9, scompare1
+; XTENSA-ATOMIC-NEXT: s32c1i a8, a2, 0
+; XTENSA-ATOMIC-NEXT: memw
+; XTENSA-ATOMIC-NEXT: or a2, a8, a8
+; XTENSA-ATOMIC-NEXT: retw
+ %res = cmpxchg ptr %p, i32 0, i32 1 seq_cst seq_cst
+ %res.0 = extractvalue { i32, i1 } %res, 0
+ ret i32 %res.0
+}
diff --git a/llvm/test/DebugInfo/X86/DW_AT_alloc_type.ll b/llvm/test/DebugInfo/X86/DW_AT_alloc_type.ll
new file mode 100644
index 0000000..33028f2
--- /dev/null
+++ b/llvm/test/DebugInfo/X86/DW_AT_alloc_type.ll
@@ -0,0 +1,34 @@
+; RUN: llc -O3 -o %t -filetype=obj %s
+; RUN: llvm-dwarfdump %t | FileCheck %s
+
+; based on clang++ output for `int *alloc_int() { return new int; }`
+
+
+target triple = "x86_64-unknown-linux-gnu"
+
+define dso_local ptr @alloc_int() !dbg !3 {
+; CHECK: DW_TAG_subprogram
+entry:
+ %call = call ptr @alloc(i64 noundef 4), !heapallocsite !7
+; CHECK: DW_TAG_call_site
+; CHECK: DW_AT_LLVM_alloc_type ([[ALLOCSITE:.*]])
+ ret ptr %call
+}
+
+; CHECK: {{.*}}[[ALLOCSITE]]: DW_TAG_base_type
+; CHECK: DW_AT_name ("int")
+
+declare dso_local ptr @alloc(i64 noundef)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2,!8}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !1, emissionKind: FullDebug)
+!1 = !DIFile(filename: "a.cpp", directory: "/")
+!2 = !{i32 2, !"Debug Info Version", i32 3}
+!3 = distinct !DISubprogram(name: "alloc_int", scope: !1, file: !1, line: 1, type: !4, scopeLine: 1, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition, unit: !0)
+!4 = !DISubroutineType(types: !5)
+!5 = !{!6}
+!6 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !7, size: 64)
+!7 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!8 = !{i32 2, !"Dwarf Version", i32 5}
diff --git a/llvm/test/DebugInfo/X86/dwarf-callsite-related-attrs-indirect.ll b/llvm/test/DebugInfo/X86/dwarf-callsite-related-attrs-indirect.ll
new file mode 100644
index 0000000..6c81e2e
--- /dev/null
+++ b/llvm/test/DebugInfo/X86/dwarf-callsite-related-attrs-indirect.ll
@@ -0,0 +1,78 @@
+; $ clang -O2 -S -emit-llvm indir.c -gdwarf-5
+; __attribute__((disable_tail_calls)) void call_reg(void (*f)()) { f(); }
+; __attribute__((disable_tail_calls)) void call_mem(void (**f)()) { (*f)(); }
+
+; RUN: llc -mtriple=x86_64 -debugger-tune=lldb < %s -filetype=obj -o %t.o
+; RUN: llvm-dwarfdump %t.o -o - | FileCheck %s -check-prefix=OBJ -implicit-check-not=DW_TAG_call_site -implicit-check-not=DW_AT_call_target
+; RUN: llvm-dwarfdump -verify %t.o 2>&1 | FileCheck %s -check-prefix=VERIFY
+; RUN: llvm-dwarfdump -statistics %t.o | FileCheck %s -check-prefix=STATS
+
+; VERIFY: No errors.
+; STATS: "#call site DIEs": 1,
+
+; OBJ: DW_TAG_subprogram
+; OBJ: DW_AT_name ("call_reg")
+; Function Attrs: nounwind uwtable
+define dso_local void @call_reg(ptr noundef readonly captures(none) %f) local_unnamed_addr #0 !dbg !10 {
+entry:
+ #dbg_value(ptr %f, !17, !DIExpression(), !18)
+
+; OBJ: DW_TAG_call_site
+; OBJ: DW_AT_call_target
+; OBJ: DW_AT_call_return_pc
+ call void (...) %f() #1, !dbg !19
+ ret void, !dbg !20
+}
+
+; OBJ: DW_TAG_subprogram
+; OBJ: DW_AT_name ("call_mem")
+; Function Attrs: nounwind uwtable
+define dso_local void @call_mem(ptr noundef readonly captures(none) %f) local_unnamed_addr #0 !dbg !21 {
+entry:
+ #dbg_value(ptr %f, !26, !DIExpression(), !27)
+ %0 = load ptr, ptr %f, align 8, !dbg !28, !tbaa !29
+ call void (...) %0() #1, !dbg !28
+ ret void, !dbg !33
+}
+
+attributes #0 = { nounwind uwtable "disable-tail-calls"="true" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cmov,+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" }
+attributes #1 = { nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2, !3, !4, !5, !6, !7, !8}
+!llvm.ident = !{!9}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1, producer: "clang version 22.0.0git (https://github.com/llvm/llvm-project 74e4a8645da91247dc8dc502771c2cc4d46f1f91)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None)
+!1 = !DIFile(filename: "indir.c", directory: "/tmp", checksumkind: CSK_MD5, checksum: "4a7538b13e2edbec44f43ed5154be38c")
+!2 = !{i32 7, !"Dwarf Version", i32 5}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = !{i32 1, !"wchar_size", i32 4}
+!5 = !{i32 8, !"PIC Level", i32 2}
+!6 = !{i32 7, !"PIE Level", i32 2}
+!7 = !{i32 7, !"uwtable", i32 2}
+!8 = !{i32 7, !"debug-info-assignment-tracking", i1 true}
+!9 = !{!"clang version 22.0.0git (https://github.com/llvm/llvm-project 74e4a8645da91247dc8dc502771c2cc4d46f1f91)"}
+!10 = distinct !DISubprogram(name: "call_reg", scope: !1, file: !1, line: 1, type: !11, scopeLine: 1, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !16)
+!11 = !DISubroutineType(types: !12)
+!12 = !{null, !13}
+!13 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !14, size: 64)
+!14 = !DISubroutineType(types: !15)
+!15 = !{null, null}
+!16 = !{!17}
+!17 = !DILocalVariable(name: "f", arg: 1, scope: !10, file: !1, line: 1, type: !13)
+!18 = !DILocation(line: 0, scope: !10)
+!19 = !DILocation(line: 1, column: 66, scope: !10)
+!20 = !DILocation(line: 1, column: 71, scope: !10)
+!21 = distinct !DISubprogram(name: "call_mem", scope: !1, file: !1, line: 2, type: !22, scopeLine: 2, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !25)
+!22 = !DISubroutineType(types: !23)
+!23 = !{null, !24}
+!24 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !13, size: 64)
+!25 = !{!26}
+!26 = !DILocalVariable(name: "f", arg: 1, scope: !21, file: !1, line: 2, type: !24)
+!27 = !DILocation(line: 0, scope: !21)
+!28 = !DILocation(line: 2, column: 67, scope: !21)
+!29 = !{!30, !30, i64 0}
+!30 = !{!"any pointer", !31, i64 0}
+!31 = !{!"omnipotent char", !32, i64 0}
+!32 = !{!"Simple C/C++ TBAA"}
+!33 = !DILocation(line: 2, column: 75, scope: !21)
diff --git a/llvm/test/DebugInfo/X86/dwarf-callsite-related-attrs.ll b/llvm/test/DebugInfo/X86/dwarf-callsite-related-attrs.ll
index c927ff2..8ed247d4 100644
--- a/llvm/test/DebugInfo/X86/dwarf-callsite-related-attrs.ll
+++ b/llvm/test/DebugInfo/X86/dwarf-callsite-related-attrs.ll
@@ -20,7 +20,7 @@
; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis -o /dev/null
; VERIFY: No errors.
-; STATS: "#call site DIEs": 6,
+; STATS: "#call site DIEs": 5,
@sink = global i32 0, align 4, !dbg !0
@@ -94,16 +94,10 @@ entry:
; OBJ: DW_TAG_call_site
; OBJ: DW_AT_call_origin ([[foo_sp]] "_Z3foov")
; OBJ: DW_AT_call_return_pc
-; OBJ: DW_TAG_call_site
-; OBJ: DW_AT_call_target
-; OBJ: DW_AT_call_return_pc
define i32 @main() !dbg !29 {
entry:
call void @_Z3foov(), !dbg !32
- %indirect_target = load ptr, ptr undef
- call void %indirect_target()
-
call void asm sideeffect "", "~{dirflag},~{fpsr},~{flags}"()
ret i32 0, !dbg !33
diff --git a/llvm/test/Instrumentation/ThreadSanitizer/capture-no-omit.ll b/llvm/test/Instrumentation/ThreadSanitizer/capture-no-omit.ll
new file mode 100644
index 0000000..cae0493
--- /dev/null
+++ b/llvm/test/Instrumentation/ThreadSanitizer/capture-no-omit.ll
@@ -0,0 +1,92 @@
+; RUN: opt < %s -passes=tsan -tsan-omit-by-pointer-capturing=0 -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+declare void @escape(ptr)
+
+@sink = global ptr null, align 4
+
+
+define void @captured2() nounwind uwtable sanitize_thread {
+entry:
+ %ptr = alloca i32, align 4
+ %tmp = alloca ptr, align 8
+ ; transitive escape
+ store ptr %ptr, ptr %tmp, align 8
+ %0 = load ptr, ptr %tmp, align 8
+ store ptr %0, ptr @sink, align 8
+ store i32 42, ptr %ptr, align 4
+ ret void
+}
+; CHECK-LABEL: define void @captured2
+; CHECK: __tsan_write
+; CHECK: __tsan_read
+; CHECK: __tsan_write
+; CHECK: __tsan_write
+; CHECK: ret void
+
+define void @captured3() nounwind uwtable sanitize_thread {
+entry:
+ %stkobj = alloca [2 x i32], align 8
+ ; escapes due to store into global
+ store ptr %stkobj, ptr @sink, align 8
+ ; derived is captured as its base object is captured
+ %derived = getelementptr inbounds i32, ptr %stkobj, i64 1
+ store i32 42, ptr %derived, align 4
+ ret void
+}
+; CHECK-LABEL: define void @captured3
+; CHECK: __tsan_write
+; CHECK: __tsan_write
+; CHECK: ret void
+
+define void @notcaptured2() nounwind uwtable sanitize_thread {
+entry:
+ %ptr = alloca i32, align 4
+ %tmp = alloca ptr, align 8
+ store i32 42, ptr %ptr, align 4
+ ; transitive escape
+ store ptr %ptr, ptr %tmp, align 8
+ %0 = load ptr, ptr %tmp, align 8
+ store ptr %0, ptr @sink, align 8
+ ret void
+}
+; CHECK-LABEL: define void @notcaptured2
+; CHECK: __tsan_write
+; CHECK: __tsan_write
+; CHECK: __tsan_read
+; CHECK: __tsan_write
+; CHECK: ret void
+
+define void @notcaptured3(i1 %cond) nounwind uwtable sanitize_thread {
+entry:
+ %stkobj = alloca [2 x i32], align 8
+ %derived = getelementptr inbounds i32, ptr %stkobj, i64 1
+ %ptr = select i1 %cond, ptr %derived, ptr %stkobj
+ store i32 42, ptr %ptr, align 4
+ ret void
+}
+; CHECK-LABEL: define void @notcaptured3
+; CHECK: __tsan_write
+; CHECK: ret void
+
+define void @notcaptured4() nounwind uwtable sanitize_thread {
+entry:
+ %stkobj = alloca [10 x i8], align 1
+ br label %loop
+
+exit:
+ ret void
+
+loop:
+ %count = phi i32 [ 0, %entry ], [ %addone, %loop ]
+ %derived = phi ptr [ %stkobj, %entry ], [ %ptraddone, %loop ]
+ store i32 %count, ptr %derived, align 4
+ %ptraddone = getelementptr inbounds i32, ptr %derived, i64 1
+ %addone = add nuw nsw i32 %count, 1
+ %eq10 = icmp eq i32 %addone, 10
+ br i1 %eq10, label %exit, label %loop
+}
+; CHECK-LABEL: define void @notcaptured4
+; CHECK: ret void
+; CHECK: __tsan_write
diff --git a/llvm/test/Instrumentation/ThreadSanitizer/capture.ll b/llvm/test/Instrumentation/ThreadSanitizer/capture.ll
index e1b9e03..5083c79 100644
--- a/llvm/test/Instrumentation/ThreadSanitizer/capture.ll
+++ b/llvm/test/Instrumentation/ThreadSanitizer/capture.ll
@@ -45,6 +45,7 @@ entry:
; CHECK-LABEL: define void @captured2
; CHECK: __tsan_write
; CHECK: __tsan_write
+; CHECK-NOT: __tsan_write
; CHECK: ret void
define void @captured3() nounwind uwtable sanitize_thread {
@@ -101,6 +102,7 @@ entry:
; CHECK-LABEL: define void @notcaptured2
; CHECK: __tsan_write
; CHECK: __tsan_write
+; CHECK-NOT: __tsan_write
; CHECK: ret void
define void @notcaptured3(i1 %cond) nounwind uwtable sanitize_thread {
diff --git a/llvm/test/MC/AArch64/armv9.6a-lsui.s b/llvm/test/MC/AArch64/armv9.6a-lsui.s
index d4a5e1f9..dcd2693 100644
--- a/llvm/test/MC/AArch64/armv9.6a-lsui.s
+++ b/llvm/test/MC/AArch64/armv9.6a-lsui.s
@@ -212,10 +212,10 @@ _func:
//------------------------------------------------------------------------------
ldtadd w7, wzr, [x5]
-// CHECK: ldtadd w7, wzr, [x5] // encoding: [0xbf,0x04,0x27,0x19]
+// CHECK: sttadd w7, [x5] // encoding: [0xbf,0x04,0x27,0x19]
// ERROR: instruction requires: lsui
ldtadd x9, xzr, [sp]
-// CHECK: ldtadd x9, xzr, [sp] // encoding: [0xff,0x07,0x29,0x59]
+// CHECK: sttadd x9, [sp] // encoding: [0xff,0x07,0x29,0x59]
// ERROR: instruction requires: lsui
ldtadda w7, wzr, [x5]
@@ -226,10 +226,10 @@ _func:
// ERROR: instruction requires: lsui
ldtaddl w7, wzr, [x5]
-// CHECK: ldtaddl w7, wzr, [x5] // encoding: [0xbf,0x04,0x67,0x19]
+// CHECK: sttaddl w7, [x5] // encoding: [0xbf,0x04,0x67,0x19]
// ERROR: instruction requires: lsui
ldtaddl x9, xzr, [sp]
-// CHECK: ldtaddl x9, xzr, [sp] // encoding: [0xff,0x07,0x69,0x59]
+// CHECK: sttaddl x9, [sp] // encoding: [0xff,0x07,0x69,0x59]
// ERROR: instruction requires: lsui
ldtaddal w7, wzr, [x5]
@@ -240,17 +240,17 @@ _func:
// ERROR: instruction requires: lsui
ldtclr w7, wzr, [x5]
-// CHECK: ldtclr w7, wzr, [x5] // encoding: [0xbf,0x14,0x27,0x19]
+// CHECK: sttclr w7, [x5] // encoding: [0xbf,0x14,0x27,0x19]
// ERROR: instruction requires: lsui
ldtclr x9, xzr, [sp]
-// CHECK: ldtclr x9, xzr, [sp] // encoding: [0xff,0x17,0x29,0x59]
+// CHECK: sttclr x9, [sp] // encoding: [0xff,0x17,0x29,0x59]
// ERROR: instruction requires: lsui
ldtclrl w7, wzr, [x5]
-// CHECK: ldtclrl w7, wzr, [x5] // encoding: [0xbf,0x14,0x67,0x19]
+// CHECK: sttclrl w7, [x5] // encoding: [0xbf,0x14,0x67,0x19]
// ERROR: instruction requires: lsui
ldtclrl x9, xzr, [sp]
-// CHECK: ldtclrl x9, xzr, [sp] // encoding: [0xff,0x17,0x69,0x59]
+// CHECK: sttclrl x9, [sp] // encoding: [0xff,0x17,0x69,0x59]
// ERROR: instruction requires: lsui
ldtclra w7, wzr, [x5]
@@ -268,17 +268,17 @@ _func:
// ERROR: instruction requires: lsui
ldtset w7, wzr, [x5]
-// CHECK: ldtset w7, wzr, [x5] // encoding: [0xbf,0x34,0x27,0x19]
+// CHECK: sttset w7, [x5] // encoding: [0xbf,0x34,0x27,0x19]
// ERROR: instruction requires: lsui
ldtset x9, xzr, [sp]
-// CHECK: ldtset x9, xzr, [sp] // encoding: [0xff,0x37,0x29,0x59]
+// CHECK: sttset x9, [sp] // encoding: [0xff,0x37,0x29,0x59]
// ERROR: instruction requires: lsui
ldtsetl w7, wzr, [x5]
-// CHECK: ldtsetl w7, wzr, [x5] // encoding: [0xbf,0x34,0x67,0x19]
+// CHECK: sttsetl w7, [x5] // encoding: [0xbf,0x34,0x67,0x19]
// ERROR: instruction requires: lsui
ldtsetl x9, xzr, [sp]
-// CHECK: ldtsetl x9, xzr, [sp] // encoding: [0xff,0x37,0x69,0x59]
+// CHECK: sttsetl x9, [sp] // encoding: [0xff,0x37,0x69,0x59]
// ERROR: instruction requires: lsui
ldtseta w7, wzr, [x5]
@@ -300,81 +300,81 @@ _func:
//------------------------------------------------------------------------------
sttadd w0, [x2]
-// CHECK: ldtadd w0, wzr, [x2] // encoding: [0x5f,0x04,0x20,0x19]
+// CHECK: sttadd w0, [x2] // encoding: [0x5f,0x04,0x20,0x19]
// ERROR: instruction requires: lsui
sttadd w2, [sp]
-// CHECK: ldtadd w2, wzr, [sp] // encoding: [0xff,0x07,0x22,0x19]
+// CHECK: sttadd w2, [sp] // encoding: [0xff,0x07,0x22,0x19]
// ERROR: instruction requires: lsui
sttadd x0, [x2]
-// CHECK: ldtadd x0, xzr, [x2] // encoding: [0x5f,0x04,0x20,0x59]
+// CHECK: sttadd x0, [x2] // encoding: [0x5f,0x04,0x20,0x59]
// ERROR: instruction requires: lsui
sttadd x2, [sp]
-// CHECK: ldtadd x2, xzr, [sp] // encoding: [0xff,0x07,0x22,0x59]
+// CHECK: sttadd x2, [sp] // encoding: [0xff,0x07,0x22,0x59]
// ERROR: instruction requires: lsui
sttaddl w0, [x2]
-// CHECK: ldtaddl w0, wzr, [x2] // encoding: [0x5f,0x04,0x60,0x19]
+// CHECK: sttaddl w0, [x2] // encoding: [0x5f,0x04,0x60,0x19]
// ERROR: instruction requires: lsui
sttaddl w2, [sp]
-// CHECK: ldtaddl w2, wzr, [sp] // encoding: [0xff,0x07,0x62,0x19]
+// CHECK: sttaddl w2, [sp] // encoding: [0xff,0x07,0x62,0x19]
// ERROR: instruction requires: lsui
sttaddl x0, [x2]
-// CHECK: ldtaddl x0, xzr, [x2] // encoding: [0x5f,0x04,0x60,0x59]
+// CHECK: sttaddl x0, [x2] // encoding: [0x5f,0x04,0x60,0x59]
// ERROR: instruction requires: lsui
sttaddl x2, [sp]
-// CHECK: ldtaddl x2, xzr, [sp] // encoding: [0xff,0x07,0x62,0x59]
+// CHECK: sttaddl x2, [sp] // encoding: [0xff,0x07,0x62,0x59]
// ERROR: instruction requires: lsui
sttclr w0, [x2]
-// CHECK: ldtclr w0, wzr, [x2] // encoding: [0x5f,0x14,0x20,0x19]
+// CHECK: sttclr w0, [x2] // encoding: [0x5f,0x14,0x20,0x19]
// ERROR: instruction requires: lsui
sttclr w2, [sp]
-// CHECK: ldtclr w2, wzr, [sp] // encoding: [0xff,0x17,0x22,0x19]
+// CHECK: sttclr w2, [sp] // encoding: [0xff,0x17,0x22,0x19]
// ERROR: instruction requires: lsui
sttclr x0, [x2]
-// CHECK: ldtclr x0, xzr, [x2] // encoding: [0x5f,0x14,0x20,0x59]
+// CHECK: sttclr x0, [x2] // encoding: [0x5f,0x14,0x20,0x59]
// ERROR: instruction requires: lsui
sttclr x2, [sp]
-// CHECK: ldtclr x2, xzr, [sp] // encoding: [0xff,0x17,0x22,0x59]
+// CHECK: sttclr x2, [sp] // encoding: [0xff,0x17,0x22,0x59]
// ERROR: instruction requires: lsui
sttclrl w0, [x2]
-// CHECK: ldtclrl w0, wzr, [x2] // encoding: [0x5f,0x14,0x60,0x19]
+// CHECK: sttclrl w0, [x2] // encoding: [0x5f,0x14,0x60,0x19]
// ERROR: instruction requires: lsui
sttclrl w2, [sp]
-// CHECK: ldtclrl w2, wzr, [sp] // encoding: [0xff,0x17,0x62,0x19]
+// CHECK: sttclrl w2, [sp] // encoding: [0xff,0x17,0x62,0x19]
// ERROR: instruction requires: lsui
sttclrl x0, [x2]
-// CHECK: ldtclrl x0, xzr, [x2] // encoding: [0x5f,0x14,0x60,0x59]
+// CHECK: sttclrl x0, [x2] // encoding: [0x5f,0x14,0x60,0x59]
// ERROR: instruction requires: lsui
sttclrl x2, [sp]
-// CHECK: ldtclrl x2, xzr, [sp] // encoding: [0xff,0x17,0x62,0x59]
+// CHECK: sttclrl x2, [sp] // encoding: [0xff,0x17,0x62,0x59]
// ERROR: instruction requires: lsui
sttset w0, [x2]
-// CHECK: ldtset w0, wzr, [x2] // encoding: [0x5f,0x34,0x20,0x19]
+// CHECK: sttset w0, [x2] // encoding: [0x5f,0x34,0x20,0x19]
// ERROR: instruction requires: lsui
sttset w2, [sp]
-// CHECK: ldtset w2, wzr, [sp] // encoding: [0xff,0x37,0x22,0x19]
+// CHECK: sttset w2, [sp] // encoding: [0xff,0x37,0x22,0x19]
// ERROR: instruction requires: lsui
sttset x0, [x2]
-// CHECK: ldtset x0, xzr, [x2] // encoding: [0x5f,0x34,0x20,0x59]
+// CHECK: sttset x0, [x2] // encoding: [0x5f,0x34,0x20,0x59]
// ERROR: instruction requires: lsui
sttset x2, [sp]
-// CHECK: ldtset x2, xzr, [sp] // encoding: [0xff,0x37,0x22,0x59]
+// CHECK: sttset x2, [sp] // encoding: [0xff,0x37,0x22,0x59]
// ERROR: instruction requires: lsui
sttsetl w0, [x2]
-// CHECK: ldtsetl w0, wzr, [x2] // encoding: [0x5f,0x34,0x60,0x19]
+// CHECK: sttsetl w0, [x2] // encoding: [0x5f,0x34,0x60,0x19]
// ERROR: instruction requires: lsui
sttsetl w2, [sp]
-// CHECK: ldtsetl w2, wzr, [sp] // encoding: [0xff,0x37,0x62,0x19]
+// CHECK: sttsetl w2, [sp] // encoding: [0xff,0x37,0x62,0x19]
// ERROR: instruction requires: lsui
sttsetl x0, [x2]
-// CHECK: ldtsetl x0, xzr, [x2] // encoding: [0x5f,0x34,0x60,0x59]
+// CHECK: sttsetl x0, [x2] // encoding: [0x5f,0x34,0x60,0x59]
// ERROR: instruction requires: lsui
sttsetl x2, [sp]
-// CHECK: ldtsetl x2, xzr, [sp] // encoding: [0xff,0x37,0x62,0x59]
+// CHECK: sttsetl x2, [sp] // encoding: [0xff,0x37,0x62,0x59]
// ERROR: instruction requires: lsui
//------------------------------------------------------------------------------
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_ds.s b/llvm/test/MC/AMDGPU/gfx1250_asm_ds.s
index f1641fc..b46189b 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_ds.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_ds.s
@@ -1,6 +1,1917 @@
// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s | FileCheck --check-prefixes=GFX1250 %s
// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX12-ERR %s
+ds_nop
+// GFX1250: ds_nop ; encoding: [0x00,0x00,0x50,0xd8,0x00,0x00,0x00,0x00]
+
+ds_add_f32 v1, v2
+// GFX1250: ds_add_f32 v1, v2 ; encoding: [0x00,0x00,0x54,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_f32 v1, v2 offset:65535
+// GFX1250: ds_add_f32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x54,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_f32 v1, v2 offset:0
+// GFX1250: ds_add_f32 v1, v2 ; encoding: [0x00,0x00,0x54,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_f32 v255, v255 offset:4
+// GFX1250: ds_add_f32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x54,0xd8,0xff,0xff,0x00,0x00]
+
+ds_add_rtn_f32 v5, v1, v2
+// GFX1250: ds_add_rtn_f32 v5, v1, v2 ; encoding: [0x00,0x00,0xe4,0xd9,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_f32 v5, v1, v2 offset:65535
+// GFX1250: ds_add_rtn_f32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xe4,0xd9,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_f32 v5, v1, v2 offset:0
+// GFX1250: ds_add_rtn_f32 v5, v1, v2 ; encoding: [0x00,0x00,0xe4,0xd9,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_f32 v255, v255, v255 offset:4
+// GFX1250: ds_add_rtn_f32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xe4,0xd9,0xff,0xff,0x00,0xff]
+
+ds_add_rtn_u32 v5, v1, v2
+// GFX1250: ds_add_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x80,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u32 v5, v1, v2 offset:65535
+// GFX1250: ds_add_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x80,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u32 v5, v1, v2 offset:0
+// GFX1250: ds_add_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x80,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u32 v255, v255, v255 offset:4
+// GFX1250: ds_add_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x80,0xd8,0xff,0xff,0x00,0xff]
+
+ds_add_rtn_u64 v[6:7], v1, v[2:3]
+// GFX1250: ds_add_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x80,0xd9,0x01,0x02,0x00,0x06]
+
+ds_add_rtn_u64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_add_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x80,0xd9,0x01,0x02,0x00,0x06]
+
+ds_add_rtn_u64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_add_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x80,0xd9,0x01,0x02,0x00,0x06]
+
+ds_add_rtn_u64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_add_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x80,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_add_u32 v1, v2
+// GFX1250: ds_add_u32 v1, v2 ; encoding: [0x00,0x00,0x00,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_u32 v1, v2 offset:65535
+// GFX1250: ds_add_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x00,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_u32 v1, v2 offset:0
+// GFX1250: ds_add_u32 v1, v2 ; encoding: [0x00,0x00,0x00,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_u32 v255, v255 offset:4
+// GFX1250: ds_add_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x00,0xd8,0xff,0xff,0x00,0x00]
+
+ds_add_u64 v1, v[2:3]
+// GFX1250: ds_add_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x00,0xd9,0x01,0x02,0x00,0x00]
+
+ds_add_u64 v1, v[2:3] offset:65535
+// GFX1250: ds_add_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x00,0xd9,0x01,0x02,0x00,0x00]
+
+ds_add_u64 v1, v[2:3] offset:0
+// GFX1250: ds_add_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x00,0xd9,0x01,0x02,0x00,0x00]
+
+ds_add_u64 v255, v[254:255] offset:4
+// GFX1250: ds_add_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x00,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_and_b32 v1, v2
+// GFX1250: ds_and_b32 v1, v2 ; encoding: [0x00,0x00,0x24,0xd8,0x01,0x02,0x00,0x00]
+
+ds_and_b32 v1, v2 offset:65535
+// GFX1250: ds_and_b32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x24,0xd8,0x01,0x02,0x00,0x00]
+
+ds_and_b32 v1, v2 offset:0
+// GFX1250: ds_and_b32 v1, v2 ; encoding: [0x00,0x00,0x24,0xd8,0x01,0x02,0x00,0x00]
+
+ds_and_b32 v255, v255 offset:4
+// GFX1250: ds_and_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x24,0xd8,0xff,0xff,0x00,0x00]
+
+ds_and_b64 v1, v[2:3]
+// GFX1250: ds_and_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x24,0xd9,0x01,0x02,0x00,0x00]
+
+ds_and_b64 v1, v[2:3] offset:65535
+// GFX1250: ds_and_b64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x24,0xd9,0x01,0x02,0x00,0x00]
+
+ds_and_b64 v1, v[2:3] offset:0
+// GFX1250: ds_and_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x24,0xd9,0x01,0x02,0x00,0x00]
+
+ds_and_b64 v255, v[254:255] offset:4
+// GFX1250: ds_and_b64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x24,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_and_rtn_b32 v5, v1, v2
+// GFX1250: ds_and_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xa4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b32 v5, v1, v2 offset:65535
+// GFX1250: ds_and_rtn_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xa4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b32 v5, v1, v2 offset:0
+// GFX1250: ds_and_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xa4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b32 v255, v255, v255 offset:4
+// GFX1250: ds_and_rtn_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xa4,0xd8,0xff,0xff,0x00,0xff]
+
+ds_and_rtn_b64 v[6:7], v1, v[2:3]
+// GFX1250: ds_and_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xa4,0xd9,0x01,0x02,0x00,0x06]
+
+ds_and_rtn_b64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_and_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xa4,0xd9,0x01,0x02,0x00,0x06]
+
+ds_and_rtn_b64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_and_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xa4,0xd9,0x01,0x02,0x00,0x06]
+
+ds_and_rtn_b64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_and_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xa4,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_append v5
+// GFX1250: ds_append v5 ; encoding: [0x00,0x00,0xf8,0xd8,0x00,0x00,0x00,0x05]
+
+ds_append v5 offset:65535
+// GFX1250: ds_append v5 offset:65535 ; encoding: [0xff,0xff,0xf8,0xd8,0x00,0x00,0x00,0x05]
+
+ds_append v5 offset:0
+// GFX1250: ds_append v5 ; encoding: [0x00,0x00,0xf8,0xd8,0x00,0x00,0x00,0x05]
+
+ds_append v255 offset:4
+// GFX1250: ds_append v255 offset:4 ; encoding: [0x04,0x00,0xf8,0xd8,0x00,0x00,0x00,0xff]
+
+ds_bpermute_b32 v5, v1, v2
+// GFX1250: ds_bpermute_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xcc,0xda,0x01,0x02,0x00,0x05]
+
+ds_bpermute_b32 v5, v1, v2 offset:65535
+// GFX1250: ds_bpermute_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xcc,0xda,0x01,0x02,0x00,0x05]
+
+ds_bpermute_b32 v5, v1, v2 offset:0
+// GFX1250: ds_bpermute_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xcc,0xda,0x01,0x02,0x00,0x05]
+
+ds_bpermute_b32 v255, v255, v255 offset:4
+// GFX1250: ds_bpermute_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xcc,0xda,0xff,0xff,0x00,0xff]
+
+ds_cmpstore_b32 v1, v2, v3
+// GFX1250: ds_cmpstore_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x40,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpstore_b32 v1, v2, v3 offset:65535
+// GFX1250: ds_cmpstore_b32 v1, v2, v3 offset:65535 ; encoding: [0xff,0xff,0x40,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpstore_b32 v1, v2, v3 offset:0
+// GFX1250: ds_cmpstore_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x40,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpstore_b32 v255, v255, v255 offset:4
+// GFX1250: ds_cmpstore_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x40,0xd8,0xff,0xff,0xff,0x00]
+
+ds_cmpstore_b64 v1, v[2:3], v[4:5]
+// GFX1250: ds_cmpstore_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x40,0xd9,0x01,0x02,0x04,0x00]
+
+ds_cmpstore_b64 v1, v[2:3], v[4:5] offset:65535
+// GFX1250: ds_cmpstore_b64 v1, v[2:3], v[4:5] offset:65535 ; encoding: [0xff,0xff,0x40,0xd9,0x01,0x02,0x04,0x00]
+
+ds_cmpstore_b64 v1, v[2:3], v[4:5] offset:0
+// GFX1250: ds_cmpstore_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x40,0xd9,0x01,0x02,0x04,0x00]
+
+ds_cmpstore_b64 v255, v[254:255], v[254:255] offset:4
+// GFX1250: ds_cmpstore_b64 v255, v[254:255], v[254:255] offset:4 ; encoding: [0x04,0x00,0x40,0xd9,0xff,0xfe,0xfe,0x00]
+
+ds_cmpstore_rtn_b32 v5, v1, v2, v3
+// GFX1250: ds_cmpstore_rtn_b32 v5, v1, v2, v3 ; encoding: [0x00,0x00,0xc0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpstore_rtn_b32 v5, v1, v2, v3 offset:65535
+// GFX1250: ds_cmpstore_rtn_b32 v5, v1, v2, v3 offset:65535 ; encoding: [0xff,0xff,0xc0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpstore_rtn_b32 v5, v1, v2, v3 offset:0
+// GFX1250: ds_cmpstore_rtn_b32 v5, v1, v2, v3 ; encoding: [0x00,0x00,0xc0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpstore_rtn_b32 v255, v255, v255, v255 offset:4
+// GFX1250: ds_cmpstore_rtn_b32 v255, v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xc0,0xd8,0xff,0xff,0xff,0xff]
+
+ds_cmpstore_rtn_b64 v[6:7], v1, v[2:3], v[4:5]
+// GFX1250: ds_cmpstore_rtn_b64 v[6:7], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xc0,0xd9,0x01,0x02,0x04,0x06]
+
+ds_cmpstore_rtn_b64 v[6:7], v1, v[2:3], v[4:5] offset:65535
+// GFX1250: ds_cmpstore_rtn_b64 v[6:7], v1, v[2:3], v[4:5] offset:65535 ; encoding: [0xff,0xff,0xc0,0xd9,0x01,0x02,0x04,0x06]
+
+ds_cmpstore_rtn_b64 v[6:7], v1, v[2:3], v[4:5] offset:0
+// GFX1250: ds_cmpstore_rtn_b64 v[6:7], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xc0,0xd9,0x01,0x02,0x04,0x06]
+
+ds_cmpstore_rtn_b64 v[254:255], v255, v[254:255], v[254:255] offset:4
+// GFX1250: ds_cmpstore_rtn_b64 v[254:255], v255, v[254:255], v[254:255] offset:4 ; encoding: [0x04,0x00,0xc0,0xd9,0xff,0xfe,0xfe,0xfe]
+
+ds_condxchg32_rtn_b64 v[6:7], v1, v[2:3]
+// GFX1250: ds_condxchg32_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xf8,0xd9,0x01,0x02,0x00,0x06]
+
+ds_condxchg32_rtn_b64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_condxchg32_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xf8,0xd9,0x01,0x02,0x00,0x06]
+
+ds_condxchg32_rtn_b64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_condxchg32_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xf8,0xd9,0x01,0x02,0x00,0x06]
+
+ds_condxchg32_rtn_b64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_condxchg32_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xf8,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_consume v5
+// GFX1250: ds_consume v5 ; encoding: [0x00,0x00,0xf4,0xd8,0x00,0x00,0x00,0x05]
+
+ds_consume v5 offset:65535
+// GFX1250: ds_consume v5 offset:65535 ; encoding: [0xff,0xff,0xf4,0xd8,0x00,0x00,0x00,0x05]
+
+ds_consume v5 offset:0
+// GFX1250: ds_consume v5 ; encoding: [0x00,0x00,0xf4,0xd8,0x00,0x00,0x00,0x05]
+
+ds_consume v255 offset:4
+// GFX1250: ds_consume v255 offset:4 ; encoding: [0x04,0x00,0xf4,0xd8,0x00,0x00,0x00,0xff]
+
+ds_dec_rtn_u32 v5, v1, v2
+// GFX1250: ds_dec_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x90,0xd8,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u32 v5, v1, v2 offset:65535
+// GFX1250: ds_dec_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x90,0xd8,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u32 v5, v1, v2 offset:0
+// GFX1250: ds_dec_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x90,0xd8,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u32 v255, v255, v255 offset:4
+// GFX1250: ds_dec_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x90,0xd8,0xff,0xff,0x00,0xff]
+
+ds_dec_rtn_u64 v[6:7], v1, v[2:3]
+// GFX1250: ds_dec_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x90,0xd9,0x01,0x02,0x00,0x06]
+
+ds_dec_rtn_u64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_dec_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x90,0xd9,0x01,0x02,0x00,0x06]
+
+ds_dec_rtn_u64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_dec_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x90,0xd9,0x01,0x02,0x00,0x06]
+
+ds_dec_rtn_u64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_dec_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x90,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_dec_u32 v1, v2
+// GFX1250: ds_dec_u32 v1, v2 ; encoding: [0x00,0x00,0x10,0xd8,0x01,0x02,0x00,0x00]
+
+ds_dec_u32 v1, v2 offset:65535
+// GFX1250: ds_dec_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x10,0xd8,0x01,0x02,0x00,0x00]
+
+ds_dec_u32 v1, v2 offset:0
+// GFX1250: ds_dec_u32 v1, v2 ; encoding: [0x00,0x00,0x10,0xd8,0x01,0x02,0x00,0x00]
+
+ds_dec_u32 v255, v255 offset:4
+// GFX1250: ds_dec_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x10,0xd8,0xff,0xff,0x00,0x00]
+
+ds_dec_u64 v1, v[2:3]
+// GFX1250: ds_dec_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x10,0xd9,0x01,0x02,0x00,0x00]
+
+ds_dec_u64 v1, v[2:3] offset:65535
+// GFX1250: ds_dec_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x10,0xd9,0x01,0x02,0x00,0x00]
+
+ds_dec_u64 v1, v[2:3] offset:0
+// GFX1250: ds_dec_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x10,0xd9,0x01,0x02,0x00,0x00]
+
+ds_dec_u64 v255, v[254:255] offset:4
+// GFX1250: ds_dec_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x10,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_inc_rtn_u32 v5, v1, v2
+// GFX1250: ds_inc_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x8c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u32 v5, v1, v2 offset:65535
+// GFX1250: ds_inc_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x8c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u32 v5, v1, v2 offset:0
+// GFX1250: ds_inc_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x8c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u32 v255, v255, v255 offset:4
+// GFX1250: ds_inc_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x8c,0xd8,0xff,0xff,0x00,0xff]
+
+ds_inc_rtn_u64 v[6:7], v1, v[2:3]
+// GFX1250: ds_inc_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x8c,0xd9,0x01,0x02,0x00,0x06]
+
+ds_inc_rtn_u64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_inc_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x8c,0xd9,0x01,0x02,0x00,0x06]
+
+ds_inc_rtn_u64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_inc_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x8c,0xd9,0x01,0x02,0x00,0x06]
+
+ds_inc_rtn_u64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_inc_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x8c,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_inc_u32 v1, v2
+// GFX1250: ds_inc_u32 v1, v2 ; encoding: [0x00,0x00,0x0c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_inc_u32 v1, v2 offset:65535
+// GFX1250: ds_inc_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x0c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_inc_u32 v1, v2 offset:0
+// GFX1250: ds_inc_u32 v1, v2 ; encoding: [0x00,0x00,0x0c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_inc_u32 v255, v255 offset:4
+// GFX1250: ds_inc_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x0c,0xd8,0xff,0xff,0x00,0x00]
+
+ds_inc_u64 v1, v[2:3]
+// GFX1250: ds_inc_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x0c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_inc_u64 v1, v[2:3] offset:65535
+// GFX1250: ds_inc_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x0c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_inc_u64 v1, v[2:3] offset:0
+// GFX1250: ds_inc_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x0c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_inc_u64 v255, v[254:255] offset:4
+// GFX1250: ds_inc_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x0c,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_load_2addr_b32 v[6:7], v1
+// GFX1250: ds_load_2addr_b32 v[6:7], v1 ; encoding: [0x00,0x00,0xdc,0xd8,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_b32 v[6:7], v1 offset0:127 offset1:255
+// GFX1250: ds_load_2addr_b32 v[6:7], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xdc,0xd8,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_b32 v[6:7], v1 offset0:0 offset1:0
+// GFX1250: ds_load_2addr_b32 v[6:7], v1 ; encoding: [0x00,0x00,0xdc,0xd8,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_b32 v[254:255], v255 offset0:16 offset1:1
+// GFX1250: ds_load_2addr_b32 v[254:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xdc,0xd8,0xff,0x00,0x00,0xfe]
+
+ds_load_2addr_b64 v[6:9], v1
+// GFX1250: ds_load_2addr_b64 v[6:9], v1 ; encoding: [0x00,0x00,0xdc,0xd9,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_b64 v[6:9], v1 offset0:127 offset1:255
+// GFX1250: ds_load_2addr_b64 v[6:9], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xdc,0xd9,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_b64 v[6:9], v1 offset0:0 offset1:0
+// GFX1250: ds_load_2addr_b64 v[6:9], v1 ; encoding: [0x00,0x00,0xdc,0xd9,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_b64 v[252:255], v255 offset0:16 offset1:1
+// GFX1250: ds_load_2addr_b64 v[252:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xdc,0xd9,0xff,0x00,0x00,0xfc]
+
+ds_load_2addr_stride64_b32 v[6:7], v1
+// GFX1250: ds_load_2addr_stride64_b32 v[6:7], v1 ; encoding: [0x00,0x00,0xe0,0xd8,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_stride64_b32 v[6:7], v1 offset0:127 offset1:255
+// GFX1250: ds_load_2addr_stride64_b32 v[6:7], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xe0,0xd8,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_stride64_b32 v[6:7], v1 offset0:0 offset1:0
+// GFX1250: ds_load_2addr_stride64_b32 v[6:7], v1 ; encoding: [0x00,0x00,0xe0,0xd8,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_stride64_b32 v[254:255], v255 offset0:16 offset1:1
+// GFX1250: ds_load_2addr_stride64_b32 v[254:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xe0,0xd8,0xff,0x00,0x00,0xfe]
+
+ds_load_2addr_stride64_b64 v[6:9], v1
+// GFX1250: ds_load_2addr_stride64_b64 v[6:9], v1 ; encoding: [0x00,0x00,0xe0,0xd9,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_stride64_b64 v[6:9], v1 offset0:127 offset1:255
+// GFX1250: ds_load_2addr_stride64_b64 v[6:9], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xe0,0xd9,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_stride64_b64 v[6:9], v1 offset0:0 offset1:0
+// GFX1250: ds_load_2addr_stride64_b64 v[6:9], v1 ; encoding: [0x00,0x00,0xe0,0xd9,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_stride64_b64 v[252:255], v255 offset0:16 offset1:1
+// GFX1250: ds_load_2addr_stride64_b64 v[252:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xe0,0xd9,0xff,0x00,0x00,0xfc]
+
+ds_load_addtid_b32 v5
+// GFX1250: ds_load_addtid_b32 v5 ; encoding: [0x00,0x00,0xc4,0xda,0x00,0x00,0x00,0x05]
+
+ds_load_addtid_b32 v5 offset:65535
+// GFX1250: ds_load_addtid_b32 v5 offset:65535 ; encoding: [0xff,0xff,0xc4,0xda,0x00,0x00,0x00,0x05]
+
+ds_load_addtid_b32 v5 offset:0
+// GFX1250: ds_load_addtid_b32 v5 ; encoding: [0x00,0x00,0xc4,0xda,0x00,0x00,0x00,0x05]
+
+ds_load_addtid_b32 v255 offset:4
+// GFX1250: ds_load_addtid_b32 v255 offset:4 ; encoding: [0x04,0x00,0xc4,0xda,0x00,0x00,0x00,0xff]
+
+ds_load_b128 v[6:9], v1
+// GFX1250: ds_load_b128 v[6:9], v1 ; encoding: [0x00,0x00,0xfc,0xdb,0x01,0x00,0x00,0x06]
+
+ds_load_b128 v[6:9], v1 offset:65535
+// GFX1250: ds_load_b128 v[6:9], v1 offset:65535 ; encoding: [0xff,0xff,0xfc,0xdb,0x01,0x00,0x00,0x06]
+
+ds_load_b128 v[6:9], v1 offset:0
+// GFX1250: ds_load_b128 v[6:9], v1 ; encoding: [0x00,0x00,0xfc,0xdb,0x01,0x00,0x00,0x06]
+
+ds_load_b128 v[252:255], v255 offset:4
+// GFX1250: ds_load_b128 v[252:255], v255 offset:4 ; encoding: [0x04,0x00,0xfc,0xdb,0xff,0x00,0x00,0xfc]
+
+ds_load_b32 v5, v1
+// GFX1250: ds_load_b32 v5, v1 ; encoding: [0x00,0x00,0xd8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_b32 v5, v1 offset:65535
+// GFX1250: ds_load_b32 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xd8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_b32 v5, v1 offset:0
+// GFX1250: ds_load_b32 v5, v1 ; encoding: [0x00,0x00,0xd8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_b32 v255, v255 offset:4
+// GFX1250: ds_load_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0xd8,0xd8,0xff,0x00,0x00,0xff]
+
+ds_load_b64 v[6:7], v1
+// GFX1250: ds_load_b64 v[6:7], v1 ; encoding: [0x00,0x00,0xd8,0xd9,0x01,0x00,0x00,0x06]
+
+ds_load_b64 v[6:7], v1 offset:65535
+// GFX1250: ds_load_b64 v[6:7], v1 offset:65535 ; encoding: [0xff,0xff,0xd8,0xd9,0x01,0x00,0x00,0x06]
+
+ds_load_b64 v[6:7], v1 offset:0
+// GFX1250: ds_load_b64 v[6:7], v1 ; encoding: [0x00,0x00,0xd8,0xd9,0x01,0x00,0x00,0x06]
+
+ds_load_b64 v[254:255], v255 offset:4
+// GFX1250: ds_load_b64 v[254:255], v255 offset:4 ; encoding: [0x04,0x00,0xd8,0xd9,0xff,0x00,0x00,0xfe]
+
+ds_load_b96 v[6:8], v1
+// GFX1250: ds_load_b96 v[6:8], v1 ; encoding: [0x00,0x00,0xf8,0xdb,0x01,0x00,0x00,0x06]
+
+ds_load_b96 v[6:8], v1 offset:65535
+// GFX1250: ds_load_b96 v[6:8], v1 offset:65535 ; encoding: [0xff,0xff,0xf8,0xdb,0x01,0x00,0x00,0x06]
+
+ds_load_b96 v[6:8], v1 offset:0
+// GFX1250: ds_load_b96 v[6:8], v1 ; encoding: [0x00,0x00,0xf8,0xdb,0x01,0x00,0x00,0x06]
+
+ds_load_b96 v[252:254], v255 offset:4
+// GFX1250: ds_load_b96 v[252:254], v255 offset:4 ; encoding: [0x04,0x00,0xf8,0xdb,0xff,0x00,0x00,0xfc]
+
+ds_load_i16 v5, v1
+// GFX1250: ds_load_i16 v5, v1 ; encoding: [0x00,0x00,0xec,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_i16 v5, v1 offset:65535
+// GFX1250: ds_load_i16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xec,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_i16 v5, v1 offset:0
+// GFX1250: ds_load_i16 v5, v1 ; encoding: [0x00,0x00,0xec,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_i16 v255, v255 offset:4
+// GFX1250: ds_load_i16 v255, v255 offset:4 ; encoding: [0x04,0x00,0xec,0xd8,0xff,0x00,0x00,0xff]
+
+ds_load_i8 v5, v1
+// GFX1250: ds_load_i8 v5, v1 ; encoding: [0x00,0x00,0xe4,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_i8 v5, v1 offset:65535
+// GFX1250: ds_load_i8 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xe4,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_i8 v5, v1 offset:0
+// GFX1250: ds_load_i8 v5, v1 ; encoding: [0x00,0x00,0xe4,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_i8 v255, v255 offset:4
+// GFX1250: ds_load_i8 v255, v255 offset:4 ; encoding: [0x04,0x00,0xe4,0xd8,0xff,0x00,0x00,0xff]
+
+ds_load_i8_d16 v5, v1
+// GFX1250: ds_load_i8_d16 v5, v1 ; encoding: [0x00,0x00,0x90,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_i8_d16 v5, v1 offset:65535
+// GFX1250: ds_load_i8_d16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0x90,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_i8_d16 v5, v1 offset:0
+// GFX1250: ds_load_i8_d16 v5, v1 ; encoding: [0x00,0x00,0x90,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_i8_d16 v255, v255 offset:4
+// GFX1250: ds_load_i8_d16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x90,0xda,0xff,0x00,0x00,0xff]
+
+ds_load_i8_d16_hi v5, v1
+// GFX1250: ds_load_i8_d16_hi v5, v1 ; encoding: [0x00,0x00,0x94,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_i8_d16_hi v5, v1 offset:65535
+// GFX1250: ds_load_i8_d16_hi v5, v1 offset:65535 ; encoding: [0xff,0xff,0x94,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_i8_d16_hi v5, v1 offset:0
+// GFX1250: ds_load_i8_d16_hi v5, v1 ; encoding: [0x00,0x00,0x94,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_i8_d16_hi v255, v255 offset:4
+// GFX1250: ds_load_i8_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x94,0xda,0xff,0x00,0x00,0xff]
+
+ds_load_u16 v5, v1
+// GFX1250: ds_load_u16 v5, v1 ; encoding: [0x00,0x00,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_u16 v5, v1 offset:65535
+// GFX1250: ds_load_u16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_u16 v5, v1 offset:0
+// GFX1250: ds_load_u16 v5, v1 ; encoding: [0x00,0x00,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_u16 v255, v255 offset:4
+// GFX1250: ds_load_u16 v255, v255 offset:4 ; encoding: [0x04,0x00,0xf0,0xd8,0xff,0x00,0x00,0xff]
+
+ds_load_u16_d16 v5, v1
+// GFX1250: ds_load_u16_d16 v5, v1 ; encoding: [0x00,0x00,0x98,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u16_d16 v5, v1 offset:65535
+// GFX1250: ds_load_u16_d16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0x98,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u16_d16 v5, v1 offset:0
+// GFX1250: ds_load_u16_d16 v5, v1 ; encoding: [0x00,0x00,0x98,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u16_d16 v255, v255 offset:4
+// GFX1250: ds_load_u16_d16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x98,0xda,0xff,0x00,0x00,0xff]
+
+ds_load_u16_d16_hi v5, v1
+// GFX1250: ds_load_u16_d16_hi v5, v1 ; encoding: [0x00,0x00,0x9c,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u16_d16_hi v5, v1 offset:65535
+// GFX1250: ds_load_u16_d16_hi v5, v1 offset:65535 ; encoding: [0xff,0xff,0x9c,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u16_d16_hi v5, v1 offset:0
+// GFX1250: ds_load_u16_d16_hi v5, v1 ; encoding: [0x00,0x00,0x9c,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u16_d16_hi v255, v255 offset:4
+// GFX1250: ds_load_u16_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x9c,0xda,0xff,0x00,0x00,0xff]
+
+ds_load_u8 v5, v1
+// GFX1250: ds_load_u8 v5, v1 ; encoding: [0x00,0x00,0xe8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_u8 v5, v1 offset:65535
+// GFX1250: ds_load_u8 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xe8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_u8 v5, v1 offset:0
+// GFX1250: ds_load_u8 v5, v1 ; encoding: [0x00,0x00,0xe8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_u8 v255, v255 offset:4
+// GFX1250: ds_load_u8 v255, v255 offset:4 ; encoding: [0x04,0x00,0xe8,0xd8,0xff,0x00,0x00,0xff]
+
+ds_load_u8_d16 v5, v1
+// GFX1250: ds_load_u8_d16 v5, v1 ; encoding: [0x00,0x00,0x88,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u8_d16 v5, v1 offset:65535
+// GFX1250: ds_load_u8_d16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0x88,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u8_d16 v5, v1 offset:0
+// GFX1250: ds_load_u8_d16 v5, v1 ; encoding: [0x00,0x00,0x88,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u8_d16 v255, v255 offset:4
+// GFX1250: ds_load_u8_d16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x88,0xda,0xff,0x00,0x00,0xff]
+
+ds_load_u8_d16_hi v5, v1
+// GFX1250: ds_load_u8_d16_hi v5, v1 ; encoding: [0x00,0x00,0x8c,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u8_d16_hi v5, v1 offset:65535
+// GFX1250: ds_load_u8_d16_hi v5, v1 offset:65535 ; encoding: [0xff,0xff,0x8c,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u8_d16_hi v5, v1 offset:0
+// GFX1250: ds_load_u8_d16_hi v5, v1 ; encoding: [0x00,0x00,0x8c,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u8_d16_hi v255, v255 offset:4
+// GFX1250: ds_load_u8_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x8c,0xda,0xff,0x00,0x00,0xff]
+
+ds_max_num_f32 v1, v2
+// GFX1250: ds_max_num_f32 v1, v2 ; encoding: [0x00,0x00,0x4c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_num_f32 v1, v2 offset:65535
+// GFX1250: ds_max_num_f32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x4c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_num_f32 v1, v2 offset:0
+// GFX1250: ds_max_num_f32 v1, v2 ; encoding: [0x00,0x00,0x4c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_num_f32 v255, v255 offset:4
+// GFX1250: ds_max_num_f32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x4c,0xd8,0xff,0xff,0x00,0x00]
+
+ds_max_num_f64 v1, v[2:3]
+// GFX1250: ds_max_num_f64 v1, v[2:3] ; encoding: [0x00,0x00,0x4c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_num_f64 v1, v[2:3] offset:65535
+// GFX1250: ds_max_num_f64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x4c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_num_f64 v1, v[2:3] offset:0
+// GFX1250: ds_max_num_f64 v1, v[2:3] ; encoding: [0x00,0x00,0x4c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_num_f64 v255, v[254:255] offset:4
+// GFX1250: ds_max_num_f64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x4c,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_max_i32 v1, v2
+// GFX1250: ds_max_i32 v1, v2 ; encoding: [0x00,0x00,0x18,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_i32 v1, v2 offset:65535
+// GFX1250: ds_max_i32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x18,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_i32 v1, v2 offset:0
+// GFX1250: ds_max_i32 v1, v2 ; encoding: [0x00,0x00,0x18,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_i32 v255, v255 offset:4
+// GFX1250: ds_max_i32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x18,0xd8,0xff,0xff,0x00,0x00]
+
+ds_max_i64 v1, v[2:3]
+// GFX1250: ds_max_i64 v1, v[2:3] ; encoding: [0x00,0x00,0x18,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_i64 v1, v[2:3] offset:65535
+// GFX1250: ds_max_i64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x18,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_i64 v1, v[2:3] offset:0
+// GFX1250: ds_max_i64 v1, v[2:3] ; encoding: [0x00,0x00,0x18,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_i64 v255, v[254:255] offset:4
+// GFX1250: ds_max_i64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x18,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_max_num_rtn_f32 v5, v1, v2
+// GFX1250: ds_max_num_rtn_f32 v5, v1, v2 ; encoding: [0x00,0x00,0xcc,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_num_rtn_f32 v5, v1, v2 offset:65535
+// GFX1250: ds_max_num_rtn_f32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xcc,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_num_rtn_f32 v5, v1, v2 offset:0
+// GFX1250: ds_max_num_rtn_f32 v5, v1, v2 ; encoding: [0x00,0x00,0xcc,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_num_rtn_f32 v255, v255, v255 offset:4
+// GFX1250: ds_max_num_rtn_f32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xcc,0xd8,0xff,0xff,0x00,0xff]
+
+ds_max_num_rtn_f64 v[6:7], v1, v[2:3]
+// GFX1250: ds_max_num_rtn_f64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xcc,0xd9,0x01,0x02,0x00,0x06]
+
+ds_max_num_rtn_f64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_max_num_rtn_f64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xcc,0xd9,0x01,0x02,0x00,0x06]
+
+ds_max_num_rtn_f64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_max_num_rtn_f64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xcc,0xd9,0x01,0x02,0x00,0x06]
+
+ds_max_num_rtn_f64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_max_num_rtn_f64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xcc,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_max_rtn_i32 v5, v1, v2
+// GFX1250: ds_max_rtn_i32 v5, v1, v2 ; encoding: [0x00,0x00,0x98,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i32 v5, v1, v2 offset:65535
+// GFX1250: ds_max_rtn_i32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x98,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i32 v5, v1, v2 offset:0
+// GFX1250: ds_max_rtn_i32 v5, v1, v2 ; encoding: [0x00,0x00,0x98,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i32 v255, v255, v255 offset:4
+// GFX1250: ds_max_rtn_i32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x98,0xd8,0xff,0xff,0x00,0xff]
+
+ds_max_rtn_i64 v[6:7], v1, v[2:3]
+// GFX1250: ds_max_rtn_i64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x98,0xd9,0x01,0x02,0x00,0x06]
+
+ds_max_rtn_i64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_max_rtn_i64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x98,0xd9,0x01,0x02,0x00,0x06]
+
+ds_max_rtn_i64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_max_rtn_i64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x98,0xd9,0x01,0x02,0x00,0x06]
+
+ds_max_rtn_i64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_max_rtn_i64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x98,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_max_rtn_u32 v5, v1, v2
+// GFX1250: ds_max_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0xa0,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u32 v5, v1, v2 offset:65535
+// GFX1250: ds_max_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xa0,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u32 v5, v1, v2 offset:0
+// GFX1250: ds_max_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0xa0,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u32 v255, v255, v255 offset:4
+// GFX1250: ds_max_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xa0,0xd8,0xff,0xff,0x00,0xff]
+
+ds_max_rtn_u64 v[6:7], v1, v[2:3]
+// GFX1250: ds_max_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xa0,0xd9,0x01,0x02,0x00,0x06]
+
+ds_max_rtn_u64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_max_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xa0,0xd9,0x01,0x02,0x00,0x06]
+
+ds_max_rtn_u64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_max_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xa0,0xd9,0x01,0x02,0x00,0x06]
+
+ds_max_rtn_u64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_max_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xa0,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_max_u32 v1, v2
+// GFX1250: ds_max_u32 v1, v2 ; encoding: [0x00,0x00,0x20,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_u32 v1, v2 offset:65535
+// GFX1250: ds_max_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x20,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_u32 v1, v2 offset:0
+// GFX1250: ds_max_u32 v1, v2 ; encoding: [0x00,0x00,0x20,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_u32 v255, v255 offset:4
+// GFX1250: ds_max_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x20,0xd8,0xff,0xff,0x00,0x00]
+
+ds_max_u64 v1, v[2:3]
+// GFX1250: ds_max_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x20,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_u64 v1, v[2:3] offset:65535
+// GFX1250: ds_max_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x20,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_u64 v1, v[2:3] offset:0
+// GFX1250: ds_max_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x20,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_u64 v255, v[254:255] offset:4
+// GFX1250: ds_max_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x20,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_min_num_f32 v1, v2
+// GFX1250: ds_min_num_f32 v1, v2 ; encoding: [0x00,0x00,0x48,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_num_f32 v1, v2 offset:65535
+// GFX1250: ds_min_num_f32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x48,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_num_f32 v1, v2 offset:0
+// GFX1250: ds_min_num_f32 v1, v2 ; encoding: [0x00,0x00,0x48,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_num_f32 v255, v255 offset:4
+// GFX1250: ds_min_num_f32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x48,0xd8,0xff,0xff,0x00,0x00]
+
+ds_min_num_f64 v1, v[2:3]
+// GFX1250: ds_min_num_f64 v1, v[2:3] ; encoding: [0x00,0x00,0x48,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_num_f64 v1, v[2:3] offset:65535
+// GFX1250: ds_min_num_f64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x48,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_num_f64 v1, v[2:3] offset:0
+// GFX1250: ds_min_num_f64 v1, v[2:3] ; encoding: [0x00,0x00,0x48,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_num_f64 v255, v[254:255] offset:4
+// GFX1250: ds_min_num_f64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x48,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_min_i32 v1, v2
+// GFX1250: ds_min_i32 v1, v2 ; encoding: [0x00,0x00,0x14,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_i32 v1, v2 offset:65535
+// GFX1250: ds_min_i32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x14,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_i32 v1, v2 offset:0
+// GFX1250: ds_min_i32 v1, v2 ; encoding: [0x00,0x00,0x14,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_i32 v255, v255 offset:4
+// GFX1250: ds_min_i32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x14,0xd8,0xff,0xff,0x00,0x00]
+
+ds_min_i64 v1, v[2:3]
+// GFX1250: ds_min_i64 v1, v[2:3] ; encoding: [0x00,0x00,0x14,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_i64 v1, v[2:3] offset:65535
+// GFX1250: ds_min_i64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x14,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_i64 v1, v[2:3] offset:0
+// GFX1250: ds_min_i64 v1, v[2:3] ; encoding: [0x00,0x00,0x14,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_i64 v255, v[254:255] offset:4
+// GFX1250: ds_min_i64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x14,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_min_num_rtn_f32 v5, v1, v2
+// GFX1250: ds_min_num_rtn_f32 v5, v1, v2 ; encoding: [0x00,0x00,0xc8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_num_rtn_f32 v5, v1, v2 offset:65535
+// GFX1250: ds_min_num_rtn_f32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xc8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_num_rtn_f32 v5, v1, v2 offset:0
+// GFX1250: ds_min_num_rtn_f32 v5, v1, v2 ; encoding: [0x00,0x00,0xc8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_num_rtn_f32 v255, v255, v255 offset:4
+// GFX1250: ds_min_num_rtn_f32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xc8,0xd8,0xff,0xff,0x00,0xff]
+
+ds_min_num_rtn_f64 v[6:7], v1, v[2:3]
+// GFX1250: ds_min_num_rtn_f64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xc8,0xd9,0x01,0x02,0x00,0x06]
+
+ds_min_num_rtn_f64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_min_num_rtn_f64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xc8,0xd9,0x01,0x02,0x00,0x06]
+
+ds_min_num_rtn_f64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_min_num_rtn_f64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xc8,0xd9,0x01,0x02,0x00,0x06]
+
+ds_min_num_rtn_f64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_min_num_rtn_f64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xc8,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_min_rtn_i32 v5, v1, v2
+// GFX1250: ds_min_rtn_i32 v5, v1, v2 ; encoding: [0x00,0x00,0x94,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i32 v5, v1, v2 offset:65535
+// GFX1250: ds_min_rtn_i32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x94,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i32 v5, v1, v2 offset:0
+// GFX1250: ds_min_rtn_i32 v5, v1, v2 ; encoding: [0x00,0x00,0x94,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i32 v255, v255, v255 offset:4
+// GFX1250: ds_min_rtn_i32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x94,0xd8,0xff,0xff,0x00,0xff]
+
+ds_min_rtn_i64 v[6:7], v1, v[2:3]
+// GFX1250: ds_min_rtn_i64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x94,0xd9,0x01,0x02,0x00,0x06]
+
+ds_min_rtn_i64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_min_rtn_i64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x94,0xd9,0x01,0x02,0x00,0x06]
+
+ds_min_rtn_i64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_min_rtn_i64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x94,0xd9,0x01,0x02,0x00,0x06]
+
+ds_min_rtn_i64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_min_rtn_i64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x94,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_min_rtn_u32 v5, v1, v2
+// GFX1250: ds_min_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x9c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u32 v5, v1, v2 offset:65535
+// GFX1250: ds_min_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x9c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u32 v5, v1, v2 offset:0
+// GFX1250: ds_min_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x9c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u32 v255, v255, v255 offset:4
+// GFX1250: ds_min_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x9c,0xd8,0xff,0xff,0x00,0xff]
+
+ds_min_rtn_u64 v[6:7], v1, v[2:3]
+// GFX1250: ds_min_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x9c,0xd9,0x01,0x02,0x00,0x06]
+
+ds_min_rtn_u64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_min_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x9c,0xd9,0x01,0x02,0x00,0x06]
+
+ds_min_rtn_u64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_min_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x9c,0xd9,0x01,0x02,0x00,0x06]
+
+ds_min_rtn_u64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_min_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x9c,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_min_u32 v1, v2
+// GFX1250: ds_min_u32 v1, v2 ; encoding: [0x00,0x00,0x1c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_u32 v1, v2 offset:65535
+// GFX1250: ds_min_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x1c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_u32 v1, v2 offset:0
+// GFX1250: ds_min_u32 v1, v2 ; encoding: [0x00,0x00,0x1c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_u32 v255, v255 offset:4
+// GFX1250: ds_min_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x1c,0xd8,0xff,0xff,0x00,0x00]
+
+ds_min_u64 v1, v[2:3]
+// GFX1250: ds_min_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x1c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_u64 v1, v[2:3] offset:65535
+// GFX1250: ds_min_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x1c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_u64 v1, v[2:3] offset:0
+// GFX1250: ds_min_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x1c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_u64 v255, v[254:255] offset:4
+// GFX1250: ds_min_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x1c,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_mskor_b32 v1, v2, v3
+// GFX1250: ds_mskor_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x30,0xd8,0x01,0x02,0x03,0x00]
+
+ds_mskor_b32 v1, v2, v3 offset:65535
+// GFX1250: ds_mskor_b32 v1, v2, v3 offset:65535 ; encoding: [0xff,0xff,0x30,0xd8,0x01,0x02,0x03,0x00]
+
+ds_mskor_b32 v1, v2, v3 offset:0
+// GFX1250: ds_mskor_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x30,0xd8,0x01,0x02,0x03,0x00]
+
+ds_mskor_b32 v255, v255, v255 offset:4
+// GFX1250: ds_mskor_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x30,0xd8,0xff,0xff,0xff,0x00]
+
+ds_mskor_b64 v1, v[2:3], v[4:5]
+// GFX1250: ds_mskor_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x30,0xd9,0x01,0x02,0x04,0x00]
+
+ds_mskor_b64 v1, v[2:3], v[4:5] offset:65535
+// GFX1250: ds_mskor_b64 v1, v[2:3], v[4:5] offset:65535 ; encoding: [0xff,0xff,0x30,0xd9,0x01,0x02,0x04,0x00]
+
+ds_mskor_b64 v1, v[2:3], v[4:5] offset:0
+// GFX1250: ds_mskor_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x30,0xd9,0x01,0x02,0x04,0x00]
+
+ds_mskor_b64 v255, v[254:255], v[254:255] offset:4
+// GFX1250: ds_mskor_b64 v255, v[254:255], v[254:255] offset:4 ; encoding: [0x04,0x00,0x30,0xd9,0xff,0xfe,0xfe,0x00]
+
+ds_mskor_rtn_b32 v5, v1, v2, v3
+// GFX1250: ds_mskor_rtn_b32 v5, v1, v2, v3 ; encoding: [0x00,0x00,0xb0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b32 v5, v1, v2, v3 offset:65535
+// GFX1250: ds_mskor_rtn_b32 v5, v1, v2, v3 offset:65535 ; encoding: [0xff,0xff,0xb0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b32 v5, v1, v2, v3 offset:0
+// GFX1250: ds_mskor_rtn_b32 v5, v1, v2, v3 ; encoding: [0x00,0x00,0xb0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b32 v255, v255, v255, v255 offset:4
+// GFX1250: ds_mskor_rtn_b32 v255, v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xb0,0xd8,0xff,0xff,0xff,0xff]
+
+ds_mskor_rtn_b64 v[6:7], v1, v[2:3], v[4:5]
+// GFX1250: ds_mskor_rtn_b64 v[6:7], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xb0,0xd9,0x01,0x02,0x04,0x06]
+
+ds_mskor_rtn_b64 v[6:7], v1, v[2:3], v[4:5] offset:65535
+// GFX1250: ds_mskor_rtn_b64 v[6:7], v1, v[2:3], v[4:5] offset:65535 ; encoding: [0xff,0xff,0xb0,0xd9,0x01,0x02,0x04,0x06]
+
+ds_mskor_rtn_b64 v[6:7], v1, v[2:3], v[4:5] offset:0
+// GFX1250: ds_mskor_rtn_b64 v[6:7], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xb0,0xd9,0x01,0x02,0x04,0x06]
+
+ds_mskor_rtn_b64 v[254:255], v255, v[254:255], v[254:255] offset:4
+// GFX1250: ds_mskor_rtn_b64 v[254:255], v255, v[254:255], v[254:255] offset:4 ; encoding: [0x04,0x00,0xb0,0xd9,0xff,0xfe,0xfe,0xfe]
+
+ds_or_b32 v1, v2
+// GFX1250: ds_or_b32 v1, v2 ; encoding: [0x00,0x00,0x28,0xd8,0x01,0x02,0x00,0x00]
+
+ds_or_b32 v1, v2 offset:65535
+// GFX1250: ds_or_b32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x28,0xd8,0x01,0x02,0x00,0x00]
+
+ds_or_b32 v1, v2 offset:0
+// GFX1250: ds_or_b32 v1, v2 ; encoding: [0x00,0x00,0x28,0xd8,0x01,0x02,0x00,0x00]
+
+ds_or_b32 v255, v255 offset:4
+// GFX1250: ds_or_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x28,0xd8,0xff,0xff,0x00,0x00]
+
+ds_or_b64 v1, v[2:3]
+// GFX1250: ds_or_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x28,0xd9,0x01,0x02,0x00,0x00]
+
+ds_or_b64 v1, v[2:3] offset:65535
+// GFX1250: ds_or_b64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x28,0xd9,0x01,0x02,0x00,0x00]
+
+ds_or_b64 v1, v[2:3] offset:0
+// GFX1250: ds_or_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x28,0xd9,0x01,0x02,0x00,0x00]
+
+ds_or_b64 v255, v[254:255] offset:4
+// GFX1250: ds_or_b64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x28,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_or_rtn_b32 v5, v1, v2
+// GFX1250: ds_or_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xa8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b32 v5, v1, v2 offset:65535
+// GFX1250: ds_or_rtn_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xa8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b32 v5, v1, v2 offset:0
+// GFX1250: ds_or_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xa8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b32 v255, v255, v255 offset:4
+// GFX1250: ds_or_rtn_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xa8,0xd8,0xff,0xff,0x00,0xff]
+
+ds_or_rtn_b64 v[6:7], v1, v[2:3]
+// GFX1250: ds_or_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xa8,0xd9,0x01,0x02,0x00,0x06]
+
+ds_or_rtn_b64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_or_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xa8,0xd9,0x01,0x02,0x00,0x06]
+
+ds_or_rtn_b64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_or_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xa8,0xd9,0x01,0x02,0x00,0x06]
+
+ds_or_rtn_b64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_or_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xa8,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_permute_b32 v5, v1, v2
+// GFX1250: ds_permute_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xc8,0xda,0x01,0x02,0x00,0x05]
+
+ds_permute_b32 v5, v1, v2 offset:65535
+// GFX1250: ds_permute_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xc8,0xda,0x01,0x02,0x00,0x05]
+
+ds_permute_b32 v5, v1, v2 offset:0
+// GFX1250: ds_permute_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xc8,0xda,0x01,0x02,0x00,0x05]
+
+ds_permute_b32 v255, v255, v255 offset:4
+// GFX1250: ds_permute_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xc8,0xda,0xff,0xff,0x00,0xff]
+
+ds_pk_add_f16 v2, v1
+// GFX1250: ds_pk_add_f16 v2, v1 ; encoding: [0x00,0x00,0x68,0xda,0x02,0x01,0x00,0x00]
+
+ds_pk_add_f16 v2, v1 offset:0
+// GFX1250: ds_pk_add_f16 v2, v1 ; encoding: [0x00,0x00,0x68,0xda,0x02,0x01,0x00,0x00]
+
+ds_pk_add_f16 v2, v1 offset:4660
+// GFX1250: ds_pk_add_f16 v2, v1 offset:4660 ; encoding: [0x34,0x12,0x68,0xda,0x02,0x01,0x00,0x00]
+
+ds_pk_add_f16 v2, v1 offset:65535
+// GFX1250: ds_pk_add_f16 v2, v1 offset:65535 ; encoding: [0xff,0xff,0x68,0xda,0x02,0x01,0x00,0x00]
+
+ds_pk_add_f16 v255, v255
+// GFX1250: ds_pk_add_f16 v255, v255 ; encoding: [0x00,0x00,0x68,0xda,0xff,0xff,0x00,0x00]
+
+ds_pk_add_f16 v255, v255 offset:0
+// GFX1250: ds_pk_add_f16 v255, v255 ; encoding: [0x00,0x00,0x68,0xda,0xff,0xff,0x00,0x00]
+
+ds_pk_add_f16 v255, v255 offset:4660
+// GFX1250: ds_pk_add_f16 v255, v255 offset:4660 ; encoding: [0x34,0x12,0x68,0xda,0xff,0xff,0x00,0x00]
+
+ds_pk_add_f16 v255, v255 offset:65535
+// GFX1250: ds_pk_add_f16 v255, v255 offset:65535 ; encoding: [0xff,0xff,0x68,0xda,0xff,0xff,0x00,0x00]
+
+ds_pk_add_f16 v0, v0
+// GFX1250: ds_pk_add_f16 v0, v0 ; encoding: [0x00,0x00,0x68,0xda,0x00,0x00,0x00,0x00]
+
+ds_pk_add_bf16 v2, v1
+// GFX1250: ds_pk_add_bf16 v2, v1 ; encoding: [0x00,0x00,0x6c,0xda,0x02,0x01,0x00,0x00]
+
+ds_pk_add_bf16 v2, v1 offset:0
+// GFX1250: ds_pk_add_bf16 v2, v1 ; encoding: [0x00,0x00,0x6c,0xda,0x02,0x01,0x00,0x00]
+
+ds_pk_add_bf16 v255, v255
+// GFX1250: ds_pk_add_bf16 v255, v255 ; encoding: [0x00,0x00,0x6c,0xda,0xff,0xff,0x00,0x00]
+
+ds_pk_add_bf16 v255, v255 offset:4660
+// GFX1250: ds_pk_add_bf16 v255, v255 offset:4660 ; encoding: [0x34,0x12,0x6c,0xda,0xff,0xff,0x00,0x00]
+
+ds_pk_add_bf16 v0, v0
+// GFX1250: ds_pk_add_bf16 v0, v0 ; encoding: [0x00,0x00,0x6c,0xda,0x00,0x00,0x00,0x00]
+
+ds_pk_add_bf16 v0, v0 offset:65535
+// GFX1250: ds_pk_add_bf16 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x6c,0xda,0x00,0x00,0x00,0x00]
+
+ds_pk_add_rtn_f16 v3, v2, v1
+// GFX1250: ds_pk_add_rtn_f16 v3, v2, v1 ; encoding: [0x00,0x00,0xa8,0xda,0x02,0x01,0x00,0x03]
+
+ds_pk_add_rtn_f16 v3, v2, v1 offset:4660
+// GFX1250: ds_pk_add_rtn_f16 v3, v2, v1 offset:4660 ; encoding: [0x34,0x12,0xa8,0xda,0x02,0x01,0x00,0x03]
+
+ds_pk_add_rtn_f16 v255, v0, v200
+// GFX1250: ds_pk_add_rtn_f16 v255, v0, v200 ; encoding: [0x00,0x00,0xa8,0xda,0x00,0xc8,0x00,0xff]
+
+ds_pk_add_rtn_f16 v255, v0, v200 offset:65535
+// GFX1250: ds_pk_add_rtn_f16 v255, v0, v200 offset:65535 ; encoding: [0xff,0xff,0xa8,0xda,0x00,0xc8,0x00,0xff]
+
+ds_pk_add_rtn_f16 v255, v255, v255
+// GFX1250: ds_pk_add_rtn_f16 v255, v255, v255 ; encoding: [0x00,0x00,0xa8,0xda,0xff,0xff,0x00,0xff]
+
+ds_pk_add_rtn_bf16 v3, v2, v1
+// GFX1250: ds_pk_add_rtn_bf16 v3, v2, v1 ; encoding: [0x00,0x00,0xac,0xda,0x02,0x01,0x00,0x03]
+
+ds_pk_add_rtn_bf16 v3, v2, v1 offset:4660
+// GFX1250: ds_pk_add_rtn_bf16 v3, v2, v1 offset:4660 ; encoding: [0x34,0x12,0xac,0xda,0x02,0x01,0x00,0x03]
+
+ds_pk_add_rtn_bf16 v255, v0, v200
+// GFX1250: ds_pk_add_rtn_bf16 v255, v0, v200 ; encoding: [0x00,0x00,0xac,0xda,0x00,0xc8,0x00,0xff]
+
+ds_pk_add_rtn_bf16 v255, v255, v255
+// GFX1250: ds_pk_add_rtn_bf16 v255, v255, v255 ; encoding: [0x00,0x00,0xac,0xda,0xff,0xff,0x00,0xff]
+
+ds_pk_add_rtn_bf16 v255, v255, v255 offset:65535
+// GFX1250: ds_pk_add_rtn_bf16 v255, v255, v255 offset:65535 ; encoding: [0xff,0xff,0xac,0xda,0xff,0xff,0x00,0xff]
+
+ds_read2_b32 v[6:7], v1
+// GFX1250: ds_load_2addr_b32 v[6:7], v1 ; encoding: [0x00,0x00,0xdc,0xd8,0x01,0x00,0x00,0x06]
+
+ds_read2_b32 v[6:7], v1 offset0:127 offset1:255
+// GFX1250: ds_load_2addr_b32 v[6:7], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xdc,0xd8,0x01,0x00,0x00,0x06]
+
+ds_read2_b32 v[6:7], v1 offset0:0 offset1:0
+// GFX1250: ds_load_2addr_b32 v[6:7], v1 ; encoding: [0x00,0x00,0xdc,0xd8,0x01,0x00,0x00,0x06]
+
+ds_read2_b32 v[254:255], v255 offset0:16 offset1:1
+// GFX1250: ds_load_2addr_b32 v[254:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xdc,0xd8,0xff,0x00,0x00,0xfe]
+
+ds_read2_b64 v[6:9], v1
+// GFX1250: ds_load_2addr_b64 v[6:9], v1 ; encoding: [0x00,0x00,0xdc,0xd9,0x01,0x00,0x00,0x06]
+
+ds_read2_b64 v[6:9], v1 offset0:127 offset1:255
+// GFX1250: ds_load_2addr_b64 v[6:9], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xdc,0xd9,0x01,0x00,0x00,0x06]
+
+ds_read2_b64 v[6:9], v1 offset0:0 offset1:0
+// GFX1250: ds_load_2addr_b64 v[6:9], v1 ; encoding: [0x00,0x00,0xdc,0xd9,0x01,0x00,0x00,0x06]
+
+ds_read2_b64 v[252:255], v255 offset0:16 offset1:1
+// GFX1250: ds_load_2addr_b64 v[252:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xdc,0xd9,0xff,0x00,0x00,0xfc]
+
+ds_read2st64_b32 v[6:7], v1
+// GFX1250: ds_load_2addr_stride64_b32 v[6:7], v1 ; encoding: [0x00,0x00,0xe0,0xd8,0x01,0x00,0x00,0x06]
+
+ds_read2st64_b32 v[6:7], v1 offset0:127 offset1:255
+// GFX1250: ds_load_2addr_stride64_b32 v[6:7], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xe0,0xd8,0x01,0x00,0x00,0x06]
+
+ds_read2st64_b32 v[6:7], v1 offset0:0 offset1:0
+// GFX1250: ds_load_2addr_stride64_b32 v[6:7], v1 ; encoding: [0x00,0x00,0xe0,0xd8,0x01,0x00,0x00,0x06]
+
+ds_read2st64_b32 v[254:255], v255 offset0:16 offset1:1
+// GFX1250: ds_load_2addr_stride64_b32 v[254:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xe0,0xd8,0xff,0x00,0x00,0xfe]
+
+ds_read2st64_b64 v[6:9], v1
+// GFX1250: ds_load_2addr_stride64_b64 v[6:9], v1 ; encoding: [0x00,0x00,0xe0,0xd9,0x01,0x00,0x00,0x06]
+
+ds_read2st64_b64 v[6:9], v1 offset0:127 offset1:255
+// GFX1250: ds_load_2addr_stride64_b64 v[6:9], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xe0,0xd9,0x01,0x00,0x00,0x06]
+
+ds_read2st64_b64 v[6:9], v1 offset0:0 offset1:0
+// GFX1250: ds_load_2addr_stride64_b64 v[6:9], v1 ; encoding: [0x00,0x00,0xe0,0xd9,0x01,0x00,0x00,0x06]
+
+ds_read2st64_b64 v[252:255], v255 offset0:16 offset1:1
+// GFX1250: ds_load_2addr_stride64_b64 v[252:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xe0,0xd9,0xff,0x00,0x00,0xfc]
+
+ds_read_addtid_b32 v5
+// GFX1250: ds_load_addtid_b32 v5 ; encoding: [0x00,0x00,0xc4,0xda,0x00,0x00,0x00,0x05]
+
+ds_read_addtid_b32 v5 offset:65535
+// GFX1250: ds_load_addtid_b32 v5 offset:65535 ; encoding: [0xff,0xff,0xc4,0xda,0x00,0x00,0x00,0x05]
+
+ds_read_addtid_b32 v5 offset:0
+// GFX1250: ds_load_addtid_b32 v5 ; encoding: [0x00,0x00,0xc4,0xda,0x00,0x00,0x00,0x05]
+
+ds_read_addtid_b32 v255 offset:4
+// GFX1250: ds_load_addtid_b32 v255 offset:4 ; encoding: [0x04,0x00,0xc4,0xda,0x00,0x00,0x00,0xff]
+
+ds_read_b128 v[6:9], v1
+// GFX1250: ds_load_b128 v[6:9], v1 ; encoding: [0x00,0x00,0xfc,0xdb,0x01,0x00,0x00,0x06]
+
+ds_read_b128 v[6:9], v1 offset:65535
+// GFX1250: ds_load_b128 v[6:9], v1 offset:65535 ; encoding: [0xff,0xff,0xfc,0xdb,0x01,0x00,0x00,0x06]
+
+ds_read_b128 v[6:9], v1 offset:0
+// GFX1250: ds_load_b128 v[6:9], v1 ; encoding: [0x00,0x00,0xfc,0xdb,0x01,0x00,0x00,0x06]
+
+ds_read_b128 v[252:255], v255 offset:4
+// GFX1250: ds_load_b128 v[252:255], v255 offset:4 ; encoding: [0x04,0x00,0xfc,0xdb,0xff,0x00,0x00,0xfc]
+
+ds_read_b32 v5, v1
+// GFX1250: ds_load_b32 v5, v1 ; encoding: [0x00,0x00,0xd8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_b32 v5, v1 offset:65535
+// GFX1250: ds_load_b32 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xd8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_b32 v5, v1 offset:0
+// GFX1250: ds_load_b32 v5, v1 ; encoding: [0x00,0x00,0xd8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_b32 v255, v255 offset:4
+// GFX1250: ds_load_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0xd8,0xd8,0xff,0x00,0x00,0xff]
+
+ds_read_b64 v[6:7], v1
+// GFX1250: ds_load_b64 v[6:7], v1 ; encoding: [0x00,0x00,0xd8,0xd9,0x01,0x00,0x00,0x06]
+
+ds_read_b64 v[6:7], v1 offset:65535
+// GFX1250: ds_load_b64 v[6:7], v1 offset:65535 ; encoding: [0xff,0xff,0xd8,0xd9,0x01,0x00,0x00,0x06]
+
+ds_read_b64 v[6:7], v1 offset:0
+// GFX1250: ds_load_b64 v[6:7], v1 ; encoding: [0x00,0x00,0xd8,0xd9,0x01,0x00,0x00,0x06]
+
+ds_read_b64 v[254:255], v255 offset:4
+// GFX1250: ds_load_b64 v[254:255], v255 offset:4 ; encoding: [0x04,0x00,0xd8,0xd9,0xff,0x00,0x00,0xfe]
+
+ds_read_b96 v[6:8], v1
+// GFX1250: ds_load_b96 v[6:8], v1 ; encoding: [0x00,0x00,0xf8,0xdb,0x01,0x00,0x00,0x06]
+
+ds_read_b96 v[6:8], v1 offset:65535
+// GFX1250: ds_load_b96 v[6:8], v1 offset:65535 ; encoding: [0xff,0xff,0xf8,0xdb,0x01,0x00,0x00,0x06]
+
+ds_read_b96 v[6:8], v1 offset:0
+// GFX1250: ds_load_b96 v[6:8], v1 ; encoding: [0x00,0x00,0xf8,0xdb,0x01,0x00,0x00,0x06]
+
+ds_read_b96 v[252:254], v255 offset:4
+// GFX1250: ds_load_b96 v[252:254], v255 offset:4 ; encoding: [0x04,0x00,0xf8,0xdb,0xff,0x00,0x00,0xfc]
+
+ds_read_i16 v5, v1
+// GFX1250: ds_load_i16 v5, v1 ; encoding: [0x00,0x00,0xec,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i16 v5, v1 offset:65535
+// GFX1250: ds_load_i16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xec,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i16 v5, v1 offset:0
+// GFX1250: ds_load_i16 v5, v1 ; encoding: [0x00,0x00,0xec,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i16 v255, v255 offset:4
+// GFX1250: ds_load_i16 v255, v255 offset:4 ; encoding: [0x04,0x00,0xec,0xd8,0xff,0x00,0x00,0xff]
+
+ds_read_i8 v5, v1
+// GFX1250: ds_load_i8 v5, v1 ; encoding: [0x00,0x00,0xe4,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i8 v5, v1 offset:65535
+// GFX1250: ds_load_i8 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xe4,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i8 v5, v1 offset:0
+// GFX1250: ds_load_i8 v5, v1 ; encoding: [0x00,0x00,0xe4,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i8 v255, v255 offset:4
+// GFX1250: ds_load_i8 v255, v255 offset:4 ; encoding: [0x04,0x00,0xe4,0xd8,0xff,0x00,0x00,0xff]
+
+ds_read_i8_d16 v5, v1
+// GFX1250: ds_load_i8_d16 v5, v1 ; encoding: [0x00,0x00,0x90,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_i8_d16 v5, v1 offset:65535
+// GFX1250: ds_load_i8_d16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0x90,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_i8_d16 v5, v1 offset:0
+// GFX1250: ds_load_i8_d16 v5, v1 ; encoding: [0x00,0x00,0x90,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_i8_d16 v255, v255 offset:4
+// GFX1250: ds_load_i8_d16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x90,0xda,0xff,0x00,0x00,0xff]
+
+ds_read_i8_d16_hi v5, v1
+// GFX1250: ds_load_i8_d16_hi v5, v1 ; encoding: [0x00,0x00,0x94,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_i8_d16_hi v5, v1 offset:65535
+// GFX1250: ds_load_i8_d16_hi v5, v1 offset:65535 ; encoding: [0xff,0xff,0x94,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_i8_d16_hi v5, v1 offset:0
+// GFX1250: ds_load_i8_d16_hi v5, v1 ; encoding: [0x00,0x00,0x94,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_i8_d16_hi v255, v255 offset:4
+// GFX1250: ds_load_i8_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x94,0xda,0xff,0x00,0x00,0xff]
+
+ds_read_u16 v5, v1
+// GFX1250: ds_load_u16 v5, v1 ; encoding: [0x00,0x00,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u16 v5, v1 offset:65535
+// GFX1250: ds_load_u16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u16 v5, v1 offset:0
+// GFX1250: ds_load_u16 v5, v1 ; encoding: [0x00,0x00,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u16 v255, v255 offset:4
+// GFX1250: ds_load_u16 v255, v255 offset:4 ; encoding: [0x04,0x00,0xf0,0xd8,0xff,0x00,0x00,0xff]
+
+ds_read_u16_d16 v5, v1
+// GFX1250: ds_load_u16_d16 v5, v1 ; encoding: [0x00,0x00,0x98,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u16_d16 v5, v1 offset:65535
+// GFX1250: ds_load_u16_d16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0x98,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u16_d16 v5, v1 offset:0
+// GFX1250: ds_load_u16_d16 v5, v1 ; encoding: [0x00,0x00,0x98,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u16_d16 v255, v255 offset:4
+// GFX1250: ds_load_u16_d16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x98,0xda,0xff,0x00,0x00,0xff]
+
+ds_read_u16_d16_hi v5, v1
+// GFX1250: ds_load_u16_d16_hi v5, v1 ; encoding: [0x00,0x00,0x9c,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u16_d16_hi v5, v1 offset:65535
+// GFX1250: ds_load_u16_d16_hi v5, v1 offset:65535 ; encoding: [0xff,0xff,0x9c,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u16_d16_hi v5, v1 offset:0
+// GFX1250: ds_load_u16_d16_hi v5, v1 ; encoding: [0x00,0x00,0x9c,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u16_d16_hi v255, v255 offset:4
+// GFX1250: ds_load_u16_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x9c,0xda,0xff,0x00,0x00,0xff]
+
+ds_read_u8 v5, v1
+// GFX1250: ds_load_u8 v5, v1 ; encoding: [0x00,0x00,0xe8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u8 v5, v1 offset:65535
+// GFX1250: ds_load_u8 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xe8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u8 v5, v1 offset:0
+// GFX1250: ds_load_u8 v5, v1 ; encoding: [0x00,0x00,0xe8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u8 v255, v255 offset:4
+// GFX1250: ds_load_u8 v255, v255 offset:4 ; encoding: [0x04,0x00,0xe8,0xd8,0xff,0x00,0x00,0xff]
+
+ds_read_u8_d16 v5, v1
+// GFX1250: ds_load_u8_d16 v5, v1 ; encoding: [0x00,0x00,0x88,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u8_d16 v5, v1 offset:65535
+// GFX1250: ds_load_u8_d16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0x88,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u8_d16 v5, v1 offset:0
+// GFX1250: ds_load_u8_d16 v5, v1 ; encoding: [0x00,0x00,0x88,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u8_d16 v255, v255 offset:4
+// GFX1250: ds_load_u8_d16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x88,0xda,0xff,0x00,0x00,0xff]
+
+ds_read_u8_d16_hi v5, v1
+// GFX1250: ds_load_u8_d16_hi v5, v1 ; encoding: [0x00,0x00,0x8c,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u8_d16_hi v5, v1 offset:65535
+// GFX1250: ds_load_u8_d16_hi v5, v1 offset:65535 ; encoding: [0xff,0xff,0x8c,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u8_d16_hi v5, v1 offset:0
+// GFX1250: ds_load_u8_d16_hi v5, v1 ; encoding: [0x00,0x00,0x8c,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u8_d16_hi v255, v255 offset:4
+// GFX1250: ds_load_u8_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x8c,0xda,0xff,0x00,0x00,0xff]
+
+ds_rsub_rtn_u32 v5, v1, v2
+// GFX1250: ds_rsub_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x88,0xd8,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u32 v5, v1, v2 offset:65535
+// GFX1250: ds_rsub_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x88,0xd8,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u32 v5, v1, v2 offset:0
+// GFX1250: ds_rsub_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x88,0xd8,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u32 v255, v255, v255 offset:4
+// GFX1250: ds_rsub_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x88,0xd8,0xff,0xff,0x00,0xff]
+
+ds_rsub_rtn_u64 v[6:7], v1, v[2:3]
+// GFX1250: ds_rsub_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x88,0xd9,0x01,0x02,0x00,0x06]
+
+ds_rsub_rtn_u64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_rsub_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x88,0xd9,0x01,0x02,0x00,0x06]
+
+ds_rsub_rtn_u64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_rsub_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x88,0xd9,0x01,0x02,0x00,0x06]
+
+ds_rsub_rtn_u64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_rsub_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x88,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_rsub_u32 v1, v2
+// GFX1250: ds_rsub_u32 v1, v2 ; encoding: [0x00,0x00,0x08,0xd8,0x01,0x02,0x00,0x00]
+
+ds_rsub_u32 v1, v2 offset:65535
+// GFX1250: ds_rsub_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x08,0xd8,0x01,0x02,0x00,0x00]
+
+ds_rsub_u32 v1, v2 offset:0
+// GFX1250: ds_rsub_u32 v1, v2 ; encoding: [0x00,0x00,0x08,0xd8,0x01,0x02,0x00,0x00]
+
+ds_rsub_u32 v255, v255 offset:4
+// GFX1250: ds_rsub_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x08,0xd8,0xff,0xff,0x00,0x00]
+
+ds_rsub_u64 v1, v[2:3]
+// GFX1250: ds_rsub_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x08,0xd9,0x01,0x02,0x00,0x00]
+
+ds_rsub_u64 v1, v[2:3] offset:65535
+// GFX1250: ds_rsub_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x08,0xd9,0x01,0x02,0x00,0x00]
+
+ds_rsub_u64 v1, v[2:3] offset:0
+// GFX1250: ds_rsub_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x08,0xd9,0x01,0x02,0x00,0x00]
+
+ds_rsub_u64 v255, v[254:255] offset:4
+// GFX1250: ds_rsub_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x08,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_store_2addr_b32 v1, v2, v3
+// GFX1250: ds_store_2addr_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x38,0xd8,0x01,0x02,0x03,0x00]
+
+ds_store_2addr_b32 v1, v2, v3 offset0:127 offset1:255
+// GFX1250: ds_store_2addr_b32 v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x38,0xd8,0x01,0x02,0x03,0x00]
+
+ds_store_2addr_b32 v1, v2, v3 offset0:0 offset1:0
+// GFX1250: ds_store_2addr_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x38,0xd8,0x01,0x02,0x03,0x00]
+
+ds_store_2addr_b32 v255, v255, v255 offset0:16 offset1:1
+// GFX1250: ds_store_2addr_b32 v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0x38,0xd8,0xff,0xff,0xff,0x00]
+
+ds_store_2addr_b64 v1, v[2:3], v[4:5]
+// GFX1250: ds_store_2addr_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x38,0xd9,0x01,0x02,0x04,0x00]
+
+ds_store_2addr_b64 v1, v[2:3], v[4:5] offset0:127 offset1:255
+// GFX1250: ds_store_2addr_b64 v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x38,0xd9,0x01,0x02,0x04,0x00]
+
+ds_store_2addr_b64 v1, v[2:3], v[4:5] offset0:0 offset1:0
+// GFX1250: ds_store_2addr_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x38,0xd9,0x01,0x02,0x04,0x00]
+
+ds_store_2addr_b64 v255, v[254:255], v[254:255] offset0:16 offset1:1
+// GFX1250: ds_store_2addr_b64 v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0x38,0xd9,0xff,0xfe,0xfe,0x00]
+
+ds_store_2addr_stride64_b32 v1, v2, v3
+// GFX1250: ds_store_2addr_stride64_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x3c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_store_2addr_stride64_b32 v1, v2, v3 offset0:127 offset1:255
+// GFX1250: ds_store_2addr_stride64_b32 v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x3c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_store_2addr_stride64_b32 v1, v2, v3 offset0:0 offset1:0
+// GFX1250: ds_store_2addr_stride64_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x3c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_store_2addr_stride64_b32 v255, v255, v255 offset0:16 offset1:1
+// GFX1250: ds_store_2addr_stride64_b32 v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0x3c,0xd8,0xff,0xff,0xff,0x00]
+
+ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5]
+// GFX1250: ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x3c,0xd9,0x01,0x02,0x04,0x00]
+
+ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5] offset0:127 offset1:255
+// GFX1250: ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x3c,0xd9,0x01,0x02,0x04,0x00]
+
+ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5] offset0:0 offset1:0
+// GFX1250: ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x3c,0xd9,0x01,0x02,0x04,0x00]
+
+ds_store_2addr_stride64_b64 v255, v[254:255], v[254:255] offset0:16 offset1:1
+// GFX1250: ds_store_2addr_stride64_b64 v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0x3c,0xd9,0xff,0xfe,0xfe,0x00]
+
+ds_store_addtid_b32 v1
+// GFX1250: ds_store_addtid_b32 v1 ; encoding: [0x00,0x00,0xc0,0xda,0x00,0x01,0x00,0x00]
+
+ds_store_addtid_b32 v1 offset:65535
+// GFX1250: ds_store_addtid_b32 v1 offset:65535 ; encoding: [0xff,0xff,0xc0,0xda,0x00,0x01,0x00,0x00]
+
+ds_store_addtid_b32 v1 offset:0
+// GFX1250: ds_store_addtid_b32 v1 ; encoding: [0x00,0x00,0xc0,0xda,0x00,0x01,0x00,0x00]
+
+ds_store_addtid_b32 v255 offset:4
+// GFX1250: ds_store_addtid_b32 v255 offset:4 ; encoding: [0x04,0x00,0xc0,0xda,0x00,0xff,0x00,0x00]
+
+ds_store_b128 v1, v[2:5]
+// GFX1250: ds_store_b128 v1, v[2:5] ; encoding: [0x00,0x00,0x7c,0xdb,0x01,0x02,0x00,0x00]
+
+ds_store_b128 v1, v[2:5] offset:65535
+// GFX1250: ds_store_b128 v1, v[2:5] offset:65535 ; encoding: [0xff,0xff,0x7c,0xdb,0x01,0x02,0x00,0x00]
+
+ds_store_b128 v1, v[2:5] offset:0
+// GFX1250: ds_store_b128 v1, v[2:5] ; encoding: [0x00,0x00,0x7c,0xdb,0x01,0x02,0x00,0x00]
+
+ds_store_b128 v255, v[252:255] offset:4
+// GFX1250: ds_store_b128 v255, v[252:255] offset:4 ; encoding: [0x04,0x00,0x7c,0xdb,0xff,0xfc,0x00,0x00]
+
+ds_store_b16 v1, v2
+// GFX1250: ds_store_b16 v1, v2 ; encoding: [0x00,0x00,0x7c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_store_b16 v1, v2 offset:65535
+// GFX1250: ds_store_b16 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x7c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_store_b16 v1, v2 offset:0
+// GFX1250: ds_store_b16 v1, v2 ; encoding: [0x00,0x00,0x7c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_store_b16 v255, v255 offset:4
+// GFX1250: ds_store_b16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x7c,0xd8,0xff,0xff,0x00,0x00]
+
+ds_store_b16_d16_hi v1, v2
+// GFX1250: ds_store_b16_d16_hi v1, v2 ; encoding: [0x00,0x00,0x84,0xda,0x01,0x02,0x00,0x00]
+
+ds_store_b16_d16_hi v1, v2 offset:65535
+// GFX1250: ds_store_b16_d16_hi v1, v2 offset:65535 ; encoding: [0xff,0xff,0x84,0xda,0x01,0x02,0x00,0x00]
+
+ds_store_b16_d16_hi v1, v2 offset:0
+// GFX1250: ds_store_b16_d16_hi v1, v2 ; encoding: [0x00,0x00,0x84,0xda,0x01,0x02,0x00,0x00]
+
+ds_store_b16_d16_hi v255, v255 offset:4
+// GFX1250: ds_store_b16_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x84,0xda,0xff,0xff,0x00,0x00]
+
+ds_store_b32 v1, v2
+// GFX1250: ds_store_b32 v1, v2 ; encoding: [0x00,0x00,0x34,0xd8,0x01,0x02,0x00,0x00]
+
+ds_store_b32 v1, v2 offset:65535
+// GFX1250: ds_store_b32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x34,0xd8,0x01,0x02,0x00,0x00]
+
+ds_store_b32 v1, v2 offset:0
+// GFX1250: ds_store_b32 v1, v2 ; encoding: [0x00,0x00,0x34,0xd8,0x01,0x02,0x00,0x00]
+
+ds_store_b32 v255, v255 offset:4
+// GFX1250: ds_store_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x34,0xd8,0xff,0xff,0x00,0x00]
+
+ds_store_b64 v1, v[2:3]
+// GFX1250: ds_store_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x34,0xd9,0x01,0x02,0x00,0x00]
+
+ds_store_b64 v1, v[2:3] offset:65535
+// GFX1250: ds_store_b64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x34,0xd9,0x01,0x02,0x00,0x00]
+
+ds_store_b64 v1, v[2:3] offset:0
+// GFX1250: ds_store_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x34,0xd9,0x01,0x02,0x00,0x00]
+
+ds_store_b64 v255, v[254:255] offset:4
+// GFX1250: ds_store_b64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x34,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_store_b8 v1, v2
+// GFX1250: ds_store_b8 v1, v2 ; encoding: [0x00,0x00,0x78,0xd8,0x01,0x02,0x00,0x00]
+
+ds_store_b8 v1, v2 offset:65535
+// GFX1250: ds_store_b8 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x78,0xd8,0x01,0x02,0x00,0x00]
+
+ds_store_b8 v1, v2 offset:0
+// GFX1250: ds_store_b8 v1, v2 ; encoding: [0x00,0x00,0x78,0xd8,0x01,0x02,0x00,0x00]
+
+ds_store_b8 v255, v255 offset:4
+// GFX1250: ds_store_b8 v255, v255 offset:4 ; encoding: [0x04,0x00,0x78,0xd8,0xff,0xff,0x00,0x00]
+
+ds_store_b8_d16_hi v1, v2
+// GFX1250: ds_store_b8_d16_hi v1, v2 ; encoding: [0x00,0x00,0x80,0xda,0x01,0x02,0x00,0x00]
+
+ds_store_b8_d16_hi v1, v2 offset:65535
+// GFX1250: ds_store_b8_d16_hi v1, v2 offset:65535 ; encoding: [0xff,0xff,0x80,0xda,0x01,0x02,0x00,0x00]
+
+ds_store_b8_d16_hi v1, v2 offset:0
+// GFX1250: ds_store_b8_d16_hi v1, v2 ; encoding: [0x00,0x00,0x80,0xda,0x01,0x02,0x00,0x00]
+
+ds_store_b8_d16_hi v255, v255 offset:4
+// GFX1250: ds_store_b8_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x80,0xda,0xff,0xff,0x00,0x00]
+
+ds_store_b96 v1, v[2:4]
+// GFX1250: ds_store_b96 v1, v[2:4] ; encoding: [0x00,0x00,0x78,0xdb,0x01,0x02,0x00,0x00]
+
+ds_store_b96 v1, v[2:4] offset:65535
+// GFX1250: ds_store_b96 v1, v[2:4] offset:65535 ; encoding: [0xff,0xff,0x78,0xdb,0x01,0x02,0x00,0x00]
+
+ds_store_b96 v1, v[2:4] offset:0
+// GFX1250: ds_store_b96 v1, v[2:4] ; encoding: [0x00,0x00,0x78,0xdb,0x01,0x02,0x00,0x00]
+
+ds_store_b96 v255, v[252:254] offset:4
+// GFX1250: ds_store_b96 v255, v[252:254] offset:4 ; encoding: [0x04,0x00,0x78,0xdb,0xff,0xfc,0x00,0x00]
+
+ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3
+// GFX1250: ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3 ; encoding: [0x00,0x00,0xb8,0xd8,0x01,0x02,0x03,0x06]
+
+ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3 offset0:127 offset1:255
+// GFX1250: ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xb8,0xd8,0x01,0x02,0x03,0x06]
+
+ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3 offset0:0 offset1:0
+// GFX1250: ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3 ; encoding: [0x00,0x00,0xb8,0xd8,0x01,0x02,0x03,0x06]
+
+ds_storexchg_2addr_rtn_b32 v[254:255], v255, v255, v255 offset0:16 offset1:1
+// GFX1250: ds_storexchg_2addr_rtn_b32 v[254:255], v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xb8,0xd8,0xff,0xff,0xff,0xfe]
+
+ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5]
+// GFX1250: ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xb8,0xd9,0x01,0x02,0x04,0x06]
+
+ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:127 offset1:255
+// GFX1250: ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xb8,0xd9,0x01,0x02,0x04,0x06]
+
+ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:0 offset1:0
+// GFX1250: ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xb8,0xd9,0x01,0x02,0x04,0x06]
+
+ds_storexchg_2addr_rtn_b64 v[252:255], v255, v[254:255], v[254:255] offset0:16 offset1:1
+// GFX1250: ds_storexchg_2addr_rtn_b64 v[252:255], v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0xb8,0xd9,0xff,0xfe,0xfe,0xfc]
+
+ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3 ; encoding: [0x00,0x00,0xbc,0xd8,0x01,0x02,0x03,0x06]
+
+ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3 offset0:127 offset1:255
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xbc,0xd8,0x01,0x02,0x03,0x06]
+
+ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3 offset0:0 offset1:0
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3 ; encoding: [0x00,0x00,0xbc,0xd8,0x01,0x02,0x03,0x06]
+
+ds_storexchg_2addr_stride64_rtn_b32 v[254:255], v255, v255, v255 offset0:16 offset1:1
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[254:255], v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xbc,0xd8,0xff,0xff,0xff,0xfe]
+
+ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5]
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xbc,0xd9,0x01,0x02,0x04,0x06]
+
+ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:127 offset1:255
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xbc,0xd9,0x01,0x02,0x04,0x06]
+
+ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:0 offset1:0
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xbc,0xd9,0x01,0x02,0x04,0x06]
+
+ds_storexchg_2addr_stride64_rtn_b64 v[252:255], v255, v[254:255], v[254:255] offset0:16 offset1:1
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[252:255], v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0xbc,0xd9,0xff,0xfe,0xfe,0xfc]
+
+ds_storexchg_rtn_b32 v5, v1, v2
+// GFX1250: ds_storexchg_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xb4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_storexchg_rtn_b32 v5, v1, v2 offset:65535
+// GFX1250: ds_storexchg_rtn_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xb4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_storexchg_rtn_b32 v5, v1, v2 offset:0
+// GFX1250: ds_storexchg_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xb4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_storexchg_rtn_b32 v255, v255, v255 offset:4
+// GFX1250: ds_storexchg_rtn_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xb4,0xd8,0xff,0xff,0x00,0xff]
+
+ds_storexchg_rtn_b64 v[6:7], v1, v[2:3]
+// GFX1250: ds_storexchg_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xb4,0xd9,0x01,0x02,0x00,0x06]
+
+ds_storexchg_rtn_b64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_storexchg_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xb4,0xd9,0x01,0x02,0x00,0x06]
+
+ds_storexchg_rtn_b64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_storexchg_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xb4,0xd9,0x01,0x02,0x00,0x06]
+
+ds_storexchg_rtn_b64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_storexchg_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xb4,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_cond_sub_rtn_u32 v5, v1, v2
+// GFX1250: ds_cond_sub_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0xa0,0xda,0x01,0x02,0x00,0x05]
+
+ds_cond_sub_rtn_u32 v5, v1, v2 offset:65535
+// GFX1250: ds_cond_sub_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xa0,0xda,0x01,0x02,0x00,0x05]
+
+ds_cond_sub_rtn_u32 v5, v1, v2 offset:0
+// GFX1250: ds_cond_sub_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0xa0,0xda,0x01,0x02,0x00,0x05]
+
+ds_cond_sub_u32 v1, v2
+// GFX1250: ds_cond_sub_u32 v1, v2 ; encoding: [0x00,0x00,0x60,0xda,0x01,0x02,0x00,0x00]
+
+ds_cond_sub_u32 v1, v2 offset:65535
+// GFX1250: ds_cond_sub_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x60,0xda,0x01,0x02,0x00,0x00]
+
+ds_cond_sub_u32 v1, v2 offset:0
+// GFX1250: ds_cond_sub_u32 v1, v2 ; encoding: [0x00,0x00,0x60,0xda,0x01,0x02,0x00,0x00]
+
+ds_sub_clamp_rtn_u32 v5, v1, v2
+// GFX1250: ds_sub_clamp_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0xa4,0xda,0x01,0x02,0x00,0x05]
+
+ds_sub_clamp_rtn_u32 v5, v1, v2 offset:65535
+// GFX1250: ds_sub_clamp_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xa4,0xda,0x01,0x02,0x00,0x05]
+
+ds_sub_clamp_rtn_u32 v5, v1, v2 offset:0
+// GFX1250: ds_sub_clamp_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0xa4,0xda,0x01,0x02,0x00,0x05]
+
+ds_sub_clamp_rtn_u32 v255, v255, v255 offset:4
+// GFX1250: ds_sub_clamp_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xa4,0xda,0xff,0xff,0x00,0xff]
+
+ds_sub_clamp_u32 v1, v2
+// GFX1250: ds_sub_clamp_u32 v1, v2 ; encoding: [0x00,0x00,0x64,0xda,0x01,0x02,0x00,0x00]
+
+ds_sub_clamp_u32 v1, v2 offset:65535
+// GFX1250: ds_sub_clamp_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x64,0xda,0x01,0x02,0x00,0x00]
+
+ds_sub_clamp_u32 v1, v2 offset:0
+// GFX1250: ds_sub_clamp_u32 v1, v2 ; encoding: [0x00,0x00,0x64,0xda,0x01,0x02,0x00,0x00]
+
+ds_sub_clamp_u32 v255, v255 offset:4
+// GFX1250: ds_sub_clamp_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x64,0xda,0xff,0xff,0x00,0x00]
+
+ds_sub_rtn_u32 v5, v1, v2
+// GFX1250: ds_sub_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x84,0xd8,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u32 v5, v1, v2 offset:65535
+// GFX1250: ds_sub_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x84,0xd8,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u32 v5, v1, v2 offset:0
+// GFX1250: ds_sub_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x84,0xd8,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u32 v255, v255, v255 offset:4
+// GFX1250: ds_sub_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x84,0xd8,0xff,0xff,0x00,0xff]
+
+ds_sub_rtn_u64 v[6:7], v1, v[2:3]
+// GFX1250: ds_sub_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x84,0xd9,0x01,0x02,0x00,0x06]
+
+ds_sub_rtn_u64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_sub_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x84,0xd9,0x01,0x02,0x00,0x06]
+
+ds_sub_rtn_u64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_sub_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x84,0xd9,0x01,0x02,0x00,0x06]
+
+ds_sub_rtn_u64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_sub_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x84,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_sub_u32 v1, v2
+// GFX1250: ds_sub_u32 v1, v2 ; encoding: [0x00,0x00,0x04,0xd8,0x01,0x02,0x00,0x00]
+
+ds_sub_u32 v1, v2 offset:65535
+// GFX1250: ds_sub_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x04,0xd8,0x01,0x02,0x00,0x00]
+
+ds_sub_u32 v1, v2 offset:0
+// GFX1250: ds_sub_u32 v1, v2 ; encoding: [0x00,0x00,0x04,0xd8,0x01,0x02,0x00,0x00]
+
+ds_sub_u32 v255, v255 offset:4
+// GFX1250: ds_sub_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x04,0xd8,0xff,0xff,0x00,0x00]
+
+ds_sub_u64 v1, v[2:3]
+// GFX1250: ds_sub_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x04,0xd9,0x01,0x02,0x00,0x00]
+
+ds_sub_u64 v1, v[2:3] offset:65535
+// GFX1250: ds_sub_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x04,0xd9,0x01,0x02,0x00,0x00]
+
+ds_sub_u64 v1, v[2:3] offset:0
+// GFX1250: ds_sub_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x04,0xd9,0x01,0x02,0x00,0x00]
+
+ds_sub_u64 v255, v[254:255] offset:4
+// GFX1250: ds_sub_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x04,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_write2_b32 v1, v2, v3
+// GFX1250: ds_store_2addr_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x38,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b32 v1, v2, v3 offset0:127 offset1:255
+// GFX1250: ds_store_2addr_b32 v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x38,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b32 v1, v2, v3 offset0:0 offset1:0
+// GFX1250: ds_store_2addr_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x38,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b32 v255, v255, v255 offset0:16 offset1:1
+// GFX1250: ds_store_2addr_b32 v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0x38,0xd8,0xff,0xff,0xff,0x00]
+
+ds_write2_b64 v1, v[2:3], v[4:5]
+// GFX1250: ds_store_2addr_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x38,0xd9,0x01,0x02,0x04,0x00]
+
+ds_write2_b64 v1, v[2:3], v[4:5] offset0:127 offset1:255
+// GFX1250: ds_store_2addr_b64 v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x38,0xd9,0x01,0x02,0x04,0x00]
+
+ds_write2_b64 v1, v[2:3], v[4:5] offset0:0 offset1:0
+// GFX1250: ds_store_2addr_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x38,0xd9,0x01,0x02,0x04,0x00]
+
+ds_write2_b64 v255, v[254:255], v[254:255] offset0:16 offset1:1
+// GFX1250: ds_store_2addr_b64 v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0x38,0xd9,0xff,0xfe,0xfe,0x00]
+
+ds_write2st64_b32 v1, v2, v3
+// GFX1250: ds_store_2addr_stride64_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x3c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v1, v2, v3 offset0:127 offset1:255
+// GFX1250: ds_store_2addr_stride64_b32 v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x3c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v1, v2, v3 offset0:0 offset1:0
+// GFX1250: ds_store_2addr_stride64_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x3c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v255, v255, v255 offset0:16 offset1:1
+// GFX1250: ds_store_2addr_stride64_b32 v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0x3c,0xd8,0xff,0xff,0xff,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[4:5]
+// GFX1250: ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x3c,0xd9,0x01,0x02,0x04,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[4:5] offset0:127 offset1:255
+// GFX1250: ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x3c,0xd9,0x01,0x02,0x04,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[4:5] offset0:0 offset1:0
+// GFX1250: ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x3c,0xd9,0x01,0x02,0x04,0x00]
+
+ds_write2st64_b64 v255, v[254:255], v[254:255] offset0:16 offset1:1
+// GFX1250: ds_store_2addr_stride64_b64 v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0x3c,0xd9,0xff,0xfe,0xfe,0x00]
+
+ds_write_addtid_b32 v1
+// GFX1250: ds_store_addtid_b32 v1 ; encoding: [0x00,0x00,0xc0,0xda,0x00,0x01,0x00,0x00]
+
+ds_write_addtid_b32 v1 offset:65535
+// GFX1250: ds_store_addtid_b32 v1 offset:65535 ; encoding: [0xff,0xff,0xc0,0xda,0x00,0x01,0x00,0x00]
+
+ds_write_addtid_b32 v1 offset:0
+// GFX1250: ds_store_addtid_b32 v1 ; encoding: [0x00,0x00,0xc0,0xda,0x00,0x01,0x00,0x00]
+
+ds_write_addtid_b32 v255 offset:4
+// GFX1250: ds_store_addtid_b32 v255 offset:4 ; encoding: [0x04,0x00,0xc0,0xda,0x00,0xff,0x00,0x00]
+
+ds_write_b128 v1, v[2:5]
+// GFX1250: ds_store_b128 v1, v[2:5] ; encoding: [0x00,0x00,0x7c,0xdb,0x01,0x02,0x00,0x00]
+
+ds_write_b128 v1, v[2:5] offset:65535
+// GFX1250: ds_store_b128 v1, v[2:5] offset:65535 ; encoding: [0xff,0xff,0x7c,0xdb,0x01,0x02,0x00,0x00]
+
+ds_write_b128 v1, v[2:5] offset:0
+// GFX1250: ds_store_b128 v1, v[2:5] ; encoding: [0x00,0x00,0x7c,0xdb,0x01,0x02,0x00,0x00]
+
+ds_write_b128 v255, v[252:255] offset:4
+// GFX1250: ds_store_b128 v255, v[252:255] offset:4 ; encoding: [0x04,0x00,0x7c,0xdb,0xff,0xfc,0x00,0x00]
+
+ds_write_b16 v1, v2
+// GFX1250: ds_store_b16 v1, v2 ; encoding: [0x00,0x00,0x7c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b16 v1, v2 offset:65535
+// GFX1250: ds_store_b16 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x7c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b16 v1, v2 offset:0
+// GFX1250: ds_store_b16 v1, v2 ; encoding: [0x00,0x00,0x7c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b16 v255, v255 offset:4
+// GFX1250: ds_store_b16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x7c,0xd8,0xff,0xff,0x00,0x00]
+
+ds_write_b16_d16_hi v1, v2
+// GFX1250: ds_store_b16_d16_hi v1, v2 ; encoding: [0x00,0x00,0x84,0xda,0x01,0x02,0x00,0x00]
+
+ds_write_b16_d16_hi v1, v2 offset:65535
+// GFX1250: ds_store_b16_d16_hi v1, v2 offset:65535 ; encoding: [0xff,0xff,0x84,0xda,0x01,0x02,0x00,0x00]
+
+ds_write_b16_d16_hi v1, v2 offset:0
+// GFX1250: ds_store_b16_d16_hi v1, v2 ; encoding: [0x00,0x00,0x84,0xda,0x01,0x02,0x00,0x00]
+
+ds_write_b16_d16_hi v255, v255 offset:4
+// GFX1250: ds_store_b16_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x84,0xda,0xff,0xff,0x00,0x00]
+
+ds_write_b32 v1, v2
+// GFX1250: ds_store_b32 v1, v2 ; encoding: [0x00,0x00,0x34,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b32 v1, v2 offset:65535
+// GFX1250: ds_store_b32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x34,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b32 v1, v2 offset:0
+// GFX1250: ds_store_b32 v1, v2 ; encoding: [0x00,0x00,0x34,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b32 v255, v255 offset:4
+// GFX1250: ds_store_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x34,0xd8,0xff,0xff,0x00,0x00]
+
+ds_write_b64 v1, v[2:3]
+// GFX1250: ds_store_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x34,0xd9,0x01,0x02,0x00,0x00]
+
+ds_write_b64 v1, v[2:3] offset:65535
+// GFX1250: ds_store_b64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x34,0xd9,0x01,0x02,0x00,0x00]
+
+ds_write_b64 v1, v[2:3] offset:0
+// GFX1250: ds_store_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x34,0xd9,0x01,0x02,0x00,0x00]
+
+ds_write_b64 v255, v[254:255] offset:4
+// GFX1250: ds_store_b64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x34,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_write_b8 v1, v2
+// GFX1250: ds_store_b8 v1, v2 ; encoding: [0x00,0x00,0x78,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b8 v1, v2 offset:65535
+// GFX1250: ds_store_b8 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x78,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b8 v1, v2 offset:0
+// GFX1250: ds_store_b8 v1, v2 ; encoding: [0x00,0x00,0x78,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b8 v255, v255 offset:4
+// GFX1250: ds_store_b8 v255, v255 offset:4 ; encoding: [0x04,0x00,0x78,0xd8,0xff,0xff,0x00,0x00]
+
+ds_write_b8_d16_hi v1, v2
+// GFX1250: ds_store_b8_d16_hi v1, v2 ; encoding: [0x00,0x00,0x80,0xda,0x01,0x02,0x00,0x00]
+
+ds_write_b8_d16_hi v1, v2 offset:65535
+// GFX1250: ds_store_b8_d16_hi v1, v2 offset:65535 ; encoding: [0xff,0xff,0x80,0xda,0x01,0x02,0x00,0x00]
+
+ds_write_b8_d16_hi v1, v2 offset:0
+// GFX1250: ds_store_b8_d16_hi v1, v2 ; encoding: [0x00,0x00,0x80,0xda,0x01,0x02,0x00,0x00]
+
+ds_write_b8_d16_hi v255, v255 offset:4
+// GFX1250: ds_store_b8_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x80,0xda,0xff,0xff,0x00,0x00]
+
+ds_write_b96 v1, v[2:4]
+// GFX1250: ds_store_b96 v1, v[2:4] ; encoding: [0x00,0x00,0x78,0xdb,0x01,0x02,0x00,0x00]
+
+ds_write_b96 v1, v[2:4] offset:65535
+// GFX1250: ds_store_b96 v1, v[2:4] offset:65535 ; encoding: [0xff,0xff,0x78,0xdb,0x01,0x02,0x00,0x00]
+
+ds_write_b96 v1, v[2:4] offset:0
+// GFX1250: ds_store_b96 v1, v[2:4] ; encoding: [0x00,0x00,0x78,0xdb,0x01,0x02,0x00,0x00]
+
+ds_write_b96 v255, v[252:254] offset:4
+// GFX1250: ds_store_b96 v255, v[252:254] offset:4 ; encoding: [0x04,0x00,0x78,0xdb,0xff,0xfc,0x00,0x00]
+
+ds_wrxchg2_rtn_b32 v[6:7], v1, v2, v3
+// GFX1250: ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3 ; encoding: [0x00,0x00,0xb8,0xd8,0x01,0x02,0x03,0x06]
+
+ds_wrxchg2_rtn_b32 v[6:7], v1, v2, v3 offset0:127 offset1:255
+// GFX1250: ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xb8,0xd8,0x01,0x02,0x03,0x06]
+
+ds_wrxchg2_rtn_b32 v[6:7], v1, v2, v3 offset0:0 offset1:0
+// GFX1250: ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3 ; encoding: [0x00,0x00,0xb8,0xd8,0x01,0x02,0x03,0x06]
+
+ds_wrxchg2_rtn_b32 v[254:255], v255, v255, v255 offset0:16 offset1:1
+// GFX1250: ds_storexchg_2addr_rtn_b32 v[254:255], v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xb8,0xd8,0xff,0xff,0xff,0xfe]
+
+ds_wrxchg2_rtn_b64 v[6:9], v1, v[2:3], v[4:5]
+// GFX1250: ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xb8,0xd9,0x01,0x02,0x04,0x06]
+
+ds_wrxchg2_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:127 offset1:255
+// GFX1250: ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xb8,0xd9,0x01,0x02,0x04,0x06]
+
+ds_wrxchg2_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:0 offset1:0
+// GFX1250: ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xb8,0xd9,0x01,0x02,0x04,0x06]
+
+ds_wrxchg2_rtn_b64 v[252:255], v255, v[254:255], v[254:255] offset0:16 offset1:1
+// GFX1250: ds_storexchg_2addr_rtn_b64 v[252:255], v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0xb8,0xd9,0xff,0xfe,0xfe,0xfc]
+
+ds_wrxchg2st64_rtn_b32 v[6:7], v1, v2, v3
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3 ; encoding: [0x00,0x00,0xbc,0xd8,0x01,0x02,0x03,0x06]
+
+ds_wrxchg2st64_rtn_b32 v[6:7], v1, v2, v3 offset0:127 offset1:255
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xbc,0xd8,0x01,0x02,0x03,0x06]
+
+ds_wrxchg2st64_rtn_b32 v[6:7], v1, v2, v3 offset0:0 offset1:0
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3 ; encoding: [0x00,0x00,0xbc,0xd8,0x01,0x02,0x03,0x06]
+
+ds_wrxchg2st64_rtn_b32 v[254:255], v255, v255, v255 offset0:16 offset1:1
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[254:255], v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xbc,0xd8,0xff,0xff,0xff,0xfe]
+
+ds_wrxchg2st64_rtn_b64 v[6:9], v1, v[2:3], v[4:5]
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xbc,0xd9,0x01,0x02,0x04,0x06]
+
+ds_wrxchg2st64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:127 offset1:255
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xbc,0xd9,0x01,0x02,0x04,0x06]
+
+ds_wrxchg2st64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:0 offset1:0
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xbc,0xd9,0x01,0x02,0x04,0x06]
+
+ds_wrxchg2st64_rtn_b64 v[252:255], v255, v[254:255], v[254:255] offset0:16 offset1:1
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[252:255], v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0xbc,0xd9,0xff,0xfe,0xfe,0xfc]
+
+ds_wrxchg_rtn_b32 v5, v1, v2
+// GFX1250: ds_storexchg_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xb4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b32 v5, v1, v2 offset:65535
+// GFX1250: ds_storexchg_rtn_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xb4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b32 v5, v1, v2 offset:0
+// GFX1250: ds_storexchg_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xb4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b32 v255, v255, v255 offset:4
+// GFX1250: ds_storexchg_rtn_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xb4,0xd8,0xff,0xff,0x00,0xff]
+
+ds_wrxchg_rtn_b64 v[6:7], v1, v[2:3]
+// GFX1250: ds_storexchg_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xb4,0xd9,0x01,0x02,0x00,0x06]
+
+ds_wrxchg_rtn_b64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_storexchg_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xb4,0xd9,0x01,0x02,0x00,0x06]
+
+ds_wrxchg_rtn_b64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_storexchg_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xb4,0xd9,0x01,0x02,0x00,0x06]
+
+ds_wrxchg_rtn_b64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_storexchg_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xb4,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_xor_b32 v1, v2
+// GFX1250: ds_xor_b32 v1, v2 ; encoding: [0x00,0x00,0x2c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_xor_b32 v1, v2 offset:65535
+// GFX1250: ds_xor_b32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x2c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_xor_b32 v1, v2 offset:0
+// GFX1250: ds_xor_b32 v1, v2 ; encoding: [0x00,0x00,0x2c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_xor_b32 v255, v255 offset:4
+// GFX1250: ds_xor_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x2c,0xd8,0xff,0xff,0x00,0x00]
+
+ds_xor_b64 v1, v[2:3]
+// GFX1250: ds_xor_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x2c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_xor_b64 v1, v[2:3] offset:65535
+// GFX1250: ds_xor_b64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x2c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_xor_b64 v1, v[2:3] offset:0
+// GFX1250: ds_xor_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x2c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_xor_b64 v255, v[254:255] offset:4
+// GFX1250: ds_xor_b64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x2c,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_xor_rtn_b32 v5, v1, v2
+// GFX1250: ds_xor_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xac,0xd8,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b32 v5, v1, v2 offset:65535
+// GFX1250: ds_xor_rtn_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xac,0xd8,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b32 v5, v1, v2 offset:0
+// GFX1250: ds_xor_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xac,0xd8,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b32 v255, v255, v255 offset:4
+// GFX1250: ds_xor_rtn_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xac,0xd8,0xff,0xff,0x00,0xff]
+
+ds_xor_rtn_b64 v[6:7], v1, v[2:3]
+// GFX1250: ds_xor_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xac,0xd9,0x01,0x02,0x00,0x06]
+
+ds_xor_rtn_b64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_xor_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xac,0xd9,0x01,0x02,0x00,0x06]
+
+ds_xor_rtn_b64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_xor_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xac,0xd9,0x01,0x02,0x00,0x06]
+
+ds_xor_rtn_b64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_xor_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xac,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_swizzle_b32 v8, v2
+// GFX1250: ds_swizzle_b32 v8, v2 ; encoding: [0x00,0x00,0xd4,0xd8,0x02,0x00,0x00,0x08]
+
+ds_swizzle_b32 v8, v2 offset:0
+// GFX1250: ds_swizzle_b32 v8, v2 ; encoding: [0x00,0x00,0xd4,0xd8,0x02,0x00,0x00,0x08]
+
+ds_swizzle_b32 v8, v2 offset:0xFFFF
+// GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(FFT,31) ; encoding: [0xff,0xff,0xd4,0xd8,0x02,0x00,0x00,0x08]
+
+ds_swizzle_b32 v8, v2 offset:swizzle(QUAD_PERM, 0, 1, 2, 3)
+// GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(QUAD_PERM,0,1,2,3) ; encoding: [0xe4,0x80,0xd4,0xd8,0x02,0x00,0x00,0x08]
+
+ds_swizzle_b32 v8, v2 offset:swizzle(SWAP,16)
+// GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(SWAP,16) ; encoding: [0x1f,0x40,0xd4,0xd8,0x02,0x00,0x00,0x08]
+
+ds_swizzle_b32 v8, v2 offset:swizzle(REVERSE,8)
+// GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(REVERSE,8) ; encoding: [0x1f,0x1c,0xd4,0xd8,0x02,0x00,0x00,0x08]
+
+ds_swizzle_b32 v8, v2 offset:swizzle(BROADCAST,4,1)
+// GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(BROADCAST,4,1) ; encoding: [0x3c,0x00,0xd4,0xd8,0x02,0x00,0x00,0x08]
+
+ds_swizzle_b32 v8, v2 offset:swizzle(BROADCAST,8,7)
+// GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(BROADCAST,8,7) ; encoding: [0xf8,0x00,0xd4,0xd8,0x02,0x00,0x00,0x08]
+
+ds_swizzle_b32 v8, v2 offset:swizzle(BITMASK_PERM, "01pip")
+// GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(BITMASK_PERM,"01pip") ; encoding: [0x07,0x09,0xd4,0xd8,0x02,0x00,0x00,0x08]
+
ds_atomic_async_barrier_arrive_b64 v1 offset:65407
// GFX1250: ds_atomic_async_barrier_arrive_b64 v1 offset:65407 ; encoding: [0x7f,0xff,0x58,0xd9,0x01,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_features.s b/llvm/test/MC/AMDGPU/gfx1250_asm_features.s
new file mode 100644
index 0000000..013b790
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_features.s
@@ -0,0 +1,32 @@
+// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s | FileCheck --check-prefixes=GFX1250 %s
+
+//
+// Elements of CPol operand can be given in any order
+//
+
+s_load_b32 s4, s[2:3], 10 th:TH_LOAD_NT scope:SCOPE_SE nv
+// GFX1250: encoding: [0x01,0x01,0xb0,0xf4,0x0a,0x00,0x00,0xf8]
+
+s_load_b32 s4, s[2:3], 10 scope:SCOPE_SE nv th:TH_LOAD_NT
+// GFX1250: encoding: [0x01,0x01,0xb0,0xf4,0x0a,0x00,0x00,0xf8]
+
+s_load_b32 s4, s[2:3], 10 nv scope:SCOPE_SE th:TH_LOAD_NT
+// GFX1250: encoding: [0x01,0x01,0xb0,0xf4,0x0a,0x00,0x00,0xf8]
+
+buffer_load_b32 v5, v1, s[8:11], s3 offen offset:4095 th:TH_LOAD_NT scope:SCOPE_SE nv
+// GFX1250: encoding: [0x83,0x00,0x05,0xc4,0x05,0x10,0x94,0x40,0x01,0xff,0x0f,0x00]
+
+buffer_load_b32 v5, v1, s[8:11], s3 offen offset:4095 scope:SCOPE_SE nv th:TH_LOAD_NT
+// GFX1250: encoding: [0x83,0x00,0x05,0xc4,0x05,0x10,0x94,0x40,0x01,0xff,0x0f,0x00]
+
+buffer_load_b32 v5, v1, s[8:11], s3 offen offset:4095 nv scope:SCOPE_SE th:TH_LOAD_NT
+// GFX1250: encoding: [0x83,0x00,0x05,0xc4,0x05,0x10,0x94,0x40,0x01,0xff,0x0f,0x00]
+
+global_load_b32 v0, v[2:3], off th:TH_LOAD_NT scope:SCOPE_SE nv
+// GFX1250: encoding: [0xfc,0x00,0x05,0xee,0x00,0x00,0x14,0x00,0x02,0x00,0x00,0x00]
+
+global_load_b32 v0, v[2:3], off scope:SCOPE_SE nv th:TH_LOAD_NT
+// GFX1250: encoding: [0xfc,0x00,0x05,0xee,0x00,0x00,0x14,0x00,0x02,0x00,0x00,0x00]
+
+global_load_b32 v0, v[2:3], off nv scope:SCOPE_SE th:TH_LOAD_NT
+// GFX1250: encoding: [0xfc,0x00,0x05,0xee,0x00,0x00,0x14,0x00,0x02,0x00,0x00,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_operands.s b/llvm/test/MC/AMDGPU/gfx1250_asm_operands.s
new file mode 100644
index 0000000..100fc98
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_operands.s
@@ -0,0 +1,54 @@
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -show-encoding %s 2>&1 | FileCheck --check-prefixes=GFX1200-ERR %s
+// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s | FileCheck --check-prefix=GFX1250 %s
+
+s_mov_b32 s0, src_flat_scratch_base_lo
+// GFX1200-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: src_flat_scratch_base_lo register not available on this GPU
+// GFX1250: encoding: [0xe6,0x00,0x80,0xbe]
+
+s_mov_b32 s0, src_flat_scratch_base_hi
+// GFX1200-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: src_flat_scratch_base_hi register not available on this GPU
+// GFX1250: encoding: [0xe7,0x00,0x80,0xbe]
+
+s_mov_b64 s[0:1], src_flat_scratch_base_lo
+// GFX1200-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: src_flat_scratch_base_lo register not available on this GPU
+// GFX1250: encoding: [0xe6,0x01,0x80,0xbe]
+
+s_mov_b64 s[0:1], shared_base
+// GFX1250: encoding: [0xeb,0x01,0x80,0xbe]
+
+s_mov_b64 s[0:1], src_shared_base
+// GFX1250: encoding: [0xeb,0x01,0x80,0xbe]
+
+s_mov_b64 s[0:1], shared_limit
+// GFX1250: encoding: [0xec,0x01,0x80,0xbe]
+
+s_mov_b64 s[0:1], src_shared_limit
+// GFX1250: encoding: [0xec,0x01,0x80,0xbe]
+
+s_getreg_b32 s1, hwreg(33)
+// GFX1250: encoding: [0x21,0xf8,0x81,0xb8]
+
+s_getreg_b32 s1, hwreg(HW_REG_XNACK_STATE_PRIV)
+// GFX1200-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// GFX1250: encoding: [0x21,0xf8,0x81,0xb8]
+
+s_getreg_b32 s1, hwreg(34)
+// GFX1250: encoding: [0x22,0xf8,0x81,0xb8]
+
+s_getreg_b32 s1, hwreg(HW_REG_XNACK_MASK)
+// GFX1200-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// GFX1250: encoding: [0x22,0xf8,0x81,0xb8]
+
+s_setreg_b32 hwreg(33), s1
+// GFX1250: encoding: [0x21,0xf8,0x01,0xb9]
+
+s_setreg_b32 hwreg(HW_REG_XNACK_STATE_PRIV), s1
+// GFX1200-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// GFX1250: encoding: [0x21,0xf8,0x01,0xb9]
+
+s_setreg_b32 hwreg(34), s1
+// GFX1250: encoding: [0x22,0xf8,0x01,0xb9]
+
+s_setreg_b32 hwreg(HW_REG_XNACK_MASK), s1
+// GFX1200-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// GFX1250: encoding: [0x22,0xf8,0x01,0xb9]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_sop1.s b/llvm/test/MC/AMDGPU/gfx1250_asm_sop1.s
index 41b6e93..aab8d9a 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_sop1.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_sop1.s
@@ -45,6 +45,10 @@ s_rfe_i64 s[2:3]
s_rfe_b64 s[2:3]
// GFX1250: s_rfe_i64 s[2:3] ; encoding: [0x02,0x4a,0x80,0xbe]
+s_get_shader_cycles_u64 s[2:3]
+// GFX1250: s_get_shader_cycles_u64 s[2:3] ; encoding: [0x00,0x06,0x82,0xbe]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
s_barrier_signal -3
// GFX1250: s_barrier_signal -3 ; encoding: [0xc3,0x4e,0x80,0xbe]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_unsupported.s b/llvm/test/MC/AMDGPU/gfx1250_asm_unsupported.s
index 89bd507..7681a32 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_unsupported.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_unsupported.s
@@ -97,6 +97,20 @@ v_interp_p10_rtz_f16_f32 v0, v1, v2, v3
v_interp_p2_rtz_f16_f32 v0, v1, v2, v3
// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+;; *xf32
+
+v_mfma_f32_16x16x8_xf32 a[0:3], v[2:3], v[4:5], a[2:5]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_mfma_f32_16x16x8xf32 a[0:3], v[2:3], v[4:5], a[2:5]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_mfma_f32_32x32x4_xf32 a[0:15], v[2:3], v[4:5], a[18:33]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_mfma_f32_32x32x4xf32 a[0:15], v[2:3], v[4:5], a[18:33]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
;; Export, S_WAIT_EXPCNT and S_WAIT_EVENT
export mrt0 off, off, off, off
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vbuffer_mubuf.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vbuffer_mubuf.s
index 7a4da25..0b8f190 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vbuffer_mubuf.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vbuffer_mubuf.s
@@ -1,6 +1,2310 @@
// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s | FileCheck --check-prefix=GFX1250 %s
// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX12-ERR --implicit-check-not=error: --strict-whitespace %s
+buffer_load_b32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_b32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_b32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_b32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_b32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_b32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_b32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_b32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_b32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_b32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_b32 v5, off, s[8:11], s3
+// GFX1250: buffer_load_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_b32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_b32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_b32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_b64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_b64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_b64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_b64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_b64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_b64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_b64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b96 v[6:8], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b96 v[252:254], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_b96 v[252:254], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0xfc,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b96 v[6:8], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_b96 v[6:8], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b96 v[6:8], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_b96 v[6:8], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b96 v[6:8], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b96 v[6:8], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_b96 v[6:8], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b96 v[6:8], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_b96 v[6:8], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_b96 v[6:8], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_b96 v[6:8], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_b96 v[6:8], off, s[8:11], s3
+// GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_b96 v[6:8], off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_b96 v[6:8], off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_b96 v[6:8], off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b96 v[6:8], off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b128 v[6:9], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b128 v[252:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_b128 v[252:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0xfc,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b128 v[6:9], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_b128 v[6:9], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b128 v[6:9], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_b128 v[6:9], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b128 v[6:9], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b128 v[6:9], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_b128 v[6:9], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b128 v[6:9], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_b128 v[6:9], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_b128 v[6:9], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_b128 v[6:9], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_b128 v[6:9], off, s[8:11], s3
+// GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_b128 v[6:9], off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_b128 v[6:9], off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_b128 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b128 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_b16 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_b16 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_b16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_b16 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_d16_b16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_b16 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_d16_b16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_b16 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_b16 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_d16_b16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_b16 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_d16_b16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_b16 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_d16_b16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_b16 v5, off, s[8:11], s3
+// GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_b16 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_b16 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_d16_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_b16 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_b16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_b16 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_b16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_b16 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_b16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_b16 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_b16 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_b16 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_d16_hi_b16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_b16 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_d16_hi_b16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_b16 v5, off, s[8:11], s3
+// GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_i8 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_i8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_i8 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_i8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_i8 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_i8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_i8 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_i8 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_i8 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_d16_hi_i8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_i8 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_d16_hi_i8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_i8 v5, off, s[8:11], s3
+// GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_u8 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_u8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_u8 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_u8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_u8 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_u8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_u8 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_u8 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_u8 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_d16_hi_u8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_u8 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_d16_hi_u8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_u8 v5, off, s[8:11], s3
+// GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_i8 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_i8 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_i8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_i8 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_d16_i8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_i8 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_d16_i8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_i8 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_i8 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_d16_i8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_i8 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_d16_i8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_i8 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_d16_i8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_i8 v5, off, s[8:11], s3
+// GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_i8 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_i8 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_d16_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_u8 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_u8 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_u8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_u8 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_d16_u8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_u8 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_d16_u8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_u8 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_u8 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_d16_u8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_u8 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_d16_u8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_u8 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_d16_u8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_u8 v5, off, s[8:11], s3
+// GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_u8 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_u8 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_d16_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i8 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_i8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i8 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_i8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i8 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_i8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i8 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_i8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i8 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_i8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i8 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_i8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i8 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_i8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_i8 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_i8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_i8 v5, off, s[8:11], s3
+// GFX1250: buffer_load_i8 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_i8 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_i8 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_i8 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_i8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i16 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_i16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i16 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_i16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i16 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_i16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i16 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_i16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i16 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_i16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i16 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_i16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i16 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_i16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_i16 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_i16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_i16 v5, off, s[8:11], s3
+// GFX1250: buffer_load_i16 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_i16 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_i16 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_i16 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_i16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_i16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_i16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_i16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u8 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_u8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u8 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_u8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u8 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_u8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u8 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_u8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u8 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_u8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u8 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_u8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u8 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_u8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_u8 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_u8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_u8 v5, off, s[8:11], s3
+// GFX1250: buffer_load_u8 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_u8 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_u8 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_u8 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_u8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u16 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_u16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u16 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_u16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u16 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_u16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u16 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_u16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u16 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_u16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u16 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_u16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u16 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_u16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_u16 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_u16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_u16 v5, off, s[8:11], s3
+// GFX1250: buffer_load_u16 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_u16 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_u16 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_u16 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_u16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_u16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_u16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_u16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b8 v1, off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b8 v1, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b8 v255, off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b8 v255, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b8 v1, off, s[16:19], s4 offset:8388607
+// GFX1250: buffer_store_b8 v1, off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b8 v1, off, s[96:99], s4 offset:8388607
+// GFX1250: buffer_store_b8 v1, off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b8 v1, off, s[12:15], s101 offset:8388607
+// GFX1250: buffer_store_b8 v1, off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b8 v1, off, s[12:15], m0 offset:8388607
+// GFX1250: buffer_store_b8 v1, off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b8 v1, v0, s[12:15], s4 idxen offset:8388607
+// GFX1250: buffer_store_b8 v1, v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_store_b8 v1, v0, s[12:15], s4 offen offset:8388607
+// GFX1250: buffer_store_b8 v1, v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_store_b8 v1, off, s[12:15], s4
+// GFX1250: buffer_store_b8 v1, off, s[12:15], s4 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b8 v1, off, s[12:15], s4 offset:0
+// GFX1250: buffer_store_b8 v1, off, s[12:15], s4 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b8 v1, off, s[12:15], s4 offset:7
+// GFX1250: buffer_store_b8 v1, off, s[12:15], s4 offset:7 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_store_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_store_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_store_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b16 v1, off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b16 v1, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b16 v255, off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b16 v255, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b16 v1, off, s[16:19], s4 offset:8388607
+// GFX1250: buffer_store_b16 v1, off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b16 v1, off, s[96:99], s4 offset:8388607
+// GFX1250: buffer_store_b16 v1, off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b16 v1, off, s[12:15], s101 offset:8388607
+// GFX1250: buffer_store_b16 v1, off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b16 v1, off, s[12:15], m0 offset:8388607
+// GFX1250: buffer_store_b16 v1, off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b16 v1, v0, s[12:15], s4 idxen offset:8388607
+// GFX1250: buffer_store_b16 v1, v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_store_b16 v1, v0, s[12:15], s4 offen offset:8388607
+// GFX1250: buffer_store_b16 v1, v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_store_b16 v1, off, s[12:15], s4
+// GFX1250: buffer_store_b16 v1, off, s[12:15], s4 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b16 v1, off, s[12:15], s4 offset:0
+// GFX1250: buffer_store_b16 v1, off, s[12:15], s4 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b16 v1, off, s[12:15], s4 offset:7
+// GFX1250: buffer_store_b16 v1, off, s[12:15], s4 offset:7 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_store_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_store_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_store_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b32 v1, off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b32 v1, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b32 v255, off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b32 v255, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b32 v1, off, s[16:19], s4 offset:8388607
+// GFX1250: buffer_store_b32 v1, off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b32 v1, off, s[96:99], s4 offset:8388607
+// GFX1250: buffer_store_b32 v1, off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b32 v1, off, s[12:15], s101 offset:8388607
+// GFX1250: buffer_store_b32 v1, off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b32 v1, off, s[12:15], m0 offset:8388607
+// GFX1250: buffer_store_b32 v1, off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b32 v1, v0, s[12:15], s4 idxen offset:8388607
+// GFX1250: buffer_store_b32 v1, v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_store_b32 v1, v0, s[12:15], s4 offen offset:8388607
+// GFX1250: buffer_store_b32 v1, v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_store_b32 v1, off, s[12:15], s4
+// GFX1250: buffer_store_b32 v1, off, s[12:15], s4 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b32 v1, off, s[12:15], s4 offset:0
+// GFX1250: buffer_store_b32 v1, off, s[12:15], s4 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b32 v1, off, s[12:15], s4 offset:7
+// GFX1250: buffer_store_b32 v1, off, s[12:15], s4 offset:7 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_store_b32 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_store_b32 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b32 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_store_b32 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b64 v[2:3], off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b64 v[254:255], off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b64 v[254:255], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0xfe,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b64 v[2:3], off, s[16:19], s4 offset:8388607
+// GFX1250: buffer_store_b64 v[2:3], off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b64 v[2:3], off, s[96:99], s4 offset:8388607
+// GFX1250: buffer_store_b64 v[2:3], off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b64 v[2:3], off, s[12:15], s101 offset:8388607
+// GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b64 v[2:3], off, s[12:15], m0 offset:8388607
+// GFX1250: buffer_store_b64 v[2:3], off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b64 v[2:3], v0, s[12:15], s4 idxen offset:8388607
+// GFX1250: buffer_store_b64 v[2:3], v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_store_b64 v[2:3], v0, s[12:15], s4 offen offset:8388607
+// GFX1250: buffer_store_b64 v[2:3], v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_store_b64 v[2:3], off, s[12:15], s4
+// GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b64 v[2:3], off, s[12:15], s4 offset:0
+// GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b64 v[2:3], off, s[12:15], s4 offset:7
+// GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 offset:7 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_store_b64 v[2:3], off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b64 v[2:3], off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b96 v[2:4], off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b96 v[252:254], off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b96 v[252:254], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0xfc,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b96 v[2:4], off, s[16:19], s4 offset:8388607
+// GFX1250: buffer_store_b96 v[2:4], off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b96 v[2:4], off, s[96:99], s4 offset:8388607
+// GFX1250: buffer_store_b96 v[2:4], off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b96 v[2:4], off, s[12:15], s101 offset:8388607
+// GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b96 v[2:4], off, s[12:15], m0 offset:8388607
+// GFX1250: buffer_store_b96 v[2:4], off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b96 v[2:4], v0, s[12:15], s4 idxen offset:8388607
+// GFX1250: buffer_store_b96 v[2:4], v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_store_b96 v[2:4], v0, s[12:15], s4 offen offset:8388607
+// GFX1250: buffer_store_b96 v[2:4], v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_store_b96 v[2:4], off, s[12:15], s4
+// GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b96 v[2:4], off, s[12:15], s4 offset:0
+// GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b96 v[2:4], off, s[12:15], s4 offset:7
+// GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 offset:7 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_store_b96 v[2:4], off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b96 v[2:4], off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b128 v[2:5], off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b128 v[252:255], off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b128 v[252:255], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0xfc,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b128 v[2:5], off, s[16:19], s4 offset:8388607
+// GFX1250: buffer_store_b128 v[2:5], off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b128 v[2:5], off, s[96:99], s4 offset:8388607
+// GFX1250: buffer_store_b128 v[2:5], off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b128 v[2:5], off, s[12:15], s101 offset:8388607
+// GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b128 v[2:5], off, s[12:15], m0 offset:8388607
+// GFX1250: buffer_store_b128 v[2:5], off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b128 v[2:5], v0, s[12:15], s4 idxen offset:8388607
+// GFX1250: buffer_store_b128 v[2:5], v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_store_b128 v[2:5], v0, s[12:15], s4 offen offset:8388607
+// GFX1250: buffer_store_b128 v[2:5], v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_store_b128 v[2:5], off, s[12:15], s4
+// GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b128 v[2:5], off, s[12:15], s4 offset:0
+// GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b128 v[2:5], off, s[12:15], s4 offset:7
+// GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 offset:7 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_store_b128 v[2:5], off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b128 v[2:5], off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b8 v255, off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_d16_hi_b8 v255, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b8 v1, off, s[16:19], s4 offset:8388607
+// GFX1250: buffer_store_d16_hi_b8 v1, off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b8 v1, off, s[96:99], s4 offset:8388607
+// GFX1250: buffer_store_d16_hi_b8 v1, off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b8 v1, off, s[12:15], s101 offset:8388607
+// GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b8 v1, off, s[12:15], m0 offset:8388607
+// GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b8 v1, v0, s[12:15], s4 idxen offset:8388607
+// GFX1250: buffer_store_d16_hi_b8 v1, v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b8 v1, v0, s[12:15], s4 offen offset:8388607
+// GFX1250: buffer_store_d16_hi_b8 v1, v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b8 v1, off, s[12:15], s4
+// GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:0
+// GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:7
+// GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:7 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b16 v255, off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_d16_hi_b16 v255, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b16 v1, off, s[16:19], s4 offset:8388607
+// GFX1250: buffer_store_d16_hi_b16 v1, off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b16 v1, off, s[96:99], s4 offset:8388607
+// GFX1250: buffer_store_d16_hi_b16 v1, off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b16 v1, off, s[12:15], s101 offset:8388607
+// GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b16 v1, off, s[12:15], m0 offset:8388607
+// GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b16 v1, v0, s[12:15], s4 idxen offset:8388607
+// GFX1250: buffer_store_d16_hi_b16 v1, v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b16 v1, v0, s[12:15], s4 offen offset:8388607
+// GFX1250: buffer_store_d16_hi_b16 v1, v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b16 v1, off, s[12:15], s4
+// GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:0
+// GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:7
+// GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:7 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_pk_add_f16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_pk_add_f16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_pk_add_f16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_pk_add_f16 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_pk_add_bf16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_pk_add_bf16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_pk_add_bf16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_add_f32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_add_f32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_add_f32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_add_f32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_add_f32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_add_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_add_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_add_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_add_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_add_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_add_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_add_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_add_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_and_b32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_and_b32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_and_b32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_and_b32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_and_b32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_and_b64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_and_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_and_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b32 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[252:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b64 v[252:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0xfc,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[6:9], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[6:9], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v255, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v255, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0xff,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v255, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v255, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0xff,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v255, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v255, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0xff,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[12:15], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[12:15], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x18,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[12:15], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[12:15], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x18,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[12:15], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[12:15], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[96:99], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[96:99], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0xc0,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[96:99], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[96:99], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0xc0,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[96:99], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[96:99], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0xc0,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s101 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s101 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x65,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s101 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s101 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x65,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s101 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s101 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x65,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], m0 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], m0 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x7d,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], m0 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], m0 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x7d,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], m0 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], m0 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x7d,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 idxen offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 idxen offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 idxen offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 idxen offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 idxen offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 idxen offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 offen offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 offen offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 offen offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 offen offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 offen offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 offen offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:0 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:0 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:0 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:7 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:7 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:7 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:7 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:7 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:7 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_cond_sub_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_cond_sub_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_cond_sub_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_dec_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_dec_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_dec_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_dec_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_dec_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_dec_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_inc_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_inc_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_inc_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_inc_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_inc_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_inc_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_max_num_f32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_max_num_f32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_max_num_f32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_max_i32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_max_i32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_max_i32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_max_i32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_max_i32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_max_i64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_max_i64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_max_i64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_max_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_max_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_max_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_max_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_max_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_max_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_max_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_max_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_min_num_f32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_min_num_f32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_min_num_f32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_min_i32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_min_i32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_min_i32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_min_i32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_min_i32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_min_i64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_min_i64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_min_i64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_min_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_min_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_min_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_min_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_min_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_min_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_min_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_min_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_or_b32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_or_b32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_or_b32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_or_b32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_or_b32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_or_b64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_or_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_or_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_sub_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_sub_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_sub_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_sub_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_sub_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_sub_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_swap_b32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_swap_b32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_swap_b32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_swap_b64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_swap_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_swap_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_xor_b32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_xor_b32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_xor_b32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_xor_b64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_xor_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_xor_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
buffer_load_b32 v5, v1, s[8:11], s3 offen offset:4095 nv
// GFX1250: buffer_load_b32 v5, v1, s[8:11], s3 offen offset:4095 nv ; encoding: [0x83,0x00,0x05,0xc4,0x05,0x10,0x80,0x40,0x01,0xff,0x0f,0x00]
// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: nv is not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s
index c5bd00c..e879432 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s
@@ -5,6 +5,76 @@ v_lshl_add_u64 v[2:3], v[4:5], v7, v[8:9] dpp8:[7,6,5,4,3,2,1,0]
// GFX125X-ERR-NEXT:{{^}}v_lshl_add_u64 v[2:3], v[4:5], v7, v[8:9] dpp8:[7,6,5,4,3,2,1,0]
// GFX125X-ERR-NEXT:{{^}} ^
+v_fma_f64 v[4:5], v[2:3], v[6:7], v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_fma_f64 v[4:5], v[2:3], v[6:7], v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_div_fixup_f64 v[4:5], v[2:3], v[6:7], v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_div_fixup_f64 v[4:5], v[2:3], v[6:7], v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_div_fmas_f64 v[4:5], v[2:3], v[6:7], v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_div_fmas_f64 v[4:5], v[2:3], v[6:7], v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_div_scale_f64 v[4:5], s2, v[2:3], v[6:7], v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_div_scale_f64 v[4:5], s2, v[2:3], v[6:7], v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mad_co_u64_u32 v[4:5], s2, v2, v6, v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_mad_co_u64_u32 v[4:5], s2, v2, v6, v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mad_co_i64_i32 v[4:5], s2, v2, v6, v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_mad_co_i64_i32 v[4:5], s2, v2, v6, v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_minimum_f64 v[4:5], v[2:3], v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_minimum_f64 v[4:5], v[2:3], v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_maximum_f64 v[4:5], v[2:3], v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_maximum_f64 v[4:5], v[2:3], v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_ldexp_f64 v[4:5], v[2:3], v6 dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_ldexp_f64 v[4:5], v[2:3], v6 dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mul_lo_u32 v4, v2, v6 dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_mul_lo_u32 v4, v2, v6 dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mul_hi_u32 v4, v2, v6 dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_mul_hi_u32 v4, v2, v6 dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mul_hi_i32 v4, v2, v6 dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_mul_hi_i32 v4, v2, v6 dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_lshrrev_b64 v[4:5], v2, v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_lshrrev_b64 v[4:5], v2, v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_ashrrev_i64 v[4:5], v2, v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_ashrrev_i64 v[4:5], v2, v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
v_mad_u32 v2, v4, v7, v8 dpp8:[7,6,5,4,3,2,1,0]
// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
// GFX125X-ERR-NEXT:{{^}}v_mad_u32 v2, v4, v7, v8 dpp8:[7,6,5,4,3,2,1,0]
@@ -42,9 +112,94 @@ v_mad_nc_i64_i32 v[4:5], v2, v5, v[6:7] dpp8:[7,6,5,4,3,2,1,0]
v_lshl_add_u64 v[2:3], v[4:5], v7, v[8:9] quad_perm:[3,2,1,0]
// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
// GFX125X-ERR-NEXT:{{^}}v_lshl_add_u64 v[2:3], v[4:5], v7, v[8:9] quad_perm:[3,2,1,0]
// GFX125X-ERR-NEXT:{{^}} ^
+v_fma_f64 v[4:5], v[2:3], v[6:7], v[8:9] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_fma_f64 v[4:5], v[2:3], v[6:7], v[8:9] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_div_fixup_f64 v[4:5], v[2:3], v[6:7], v[8:9] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_div_fixup_f64 v[4:5], v[2:3], v[6:7], v[8:9] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_div_fmas_f64 v[4:5], v[2:3], v[6:7], v[8:9] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_div_fmas_f64 v[4:5], v[2:3], v[6:7], v[8:9] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_div_scale_f64 v[4:5], s2, v[2:3], v[6:7], v[8:9] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_div_scale_f64 v[4:5], s2, v[2:3], v[6:7], v[8:9] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mad_co_u64_u32 v[4:5], s2, v2, v6, v[8:9] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_mad_co_u64_u32 v[4:5], s2, v2, v6, v[8:9] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mad_co_i64_i32 v[4:5], s2, v2, v6, v[8:9] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_mad_co_i64_i32 v[4:5], s2, v2, v6, v[8:9] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_minimum_f64 v[4:5], v[2:3], v[6:7] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_minimum_f64 v[4:5], v[2:3], v[6:7] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_maximum_f64 v[4:5], v[2:3], v[6:7] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_maximum_f64 v[4:5], v[2:3], v[6:7] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_ldexp_f64 v[4:5], v[2:3], v6 quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_ldexp_f64 v[4:5], v[2:3], v6 quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mul_lo_u32 v4, v2, v6 quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_mul_lo_u32 v4, v2, v6 quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mul_hi_u32 v4, v2, v6 quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_mul_hi_u32 v4, v2, v6 quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mul_hi_i32 v4, v2, v6 quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_mul_hi_i32 v4, v2, v6 quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_lshrrev_b64 v[4:5], v2, v[6:7] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_lshrrev_b64 v[4:5], v2, v[6:7] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_ashrrev_i64 v[4:5], v2, v[6:7] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_ashrrev_i64 v[4:5], v2, v[6:7] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
v_mad_u32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
@@ -87,6 +242,11 @@ v_mad_nc_i64_i32 v[4:5], v2, v5, v[6:7] quad_perm:[3,2,1,0]
// GFX125X-ERR-NEXT:{{^}}v_mad_nc_i64_i32 v[4:5], v2, v5, v[6:7] quad_perm:[3,2,1,0]
// GFX125X-ERR-NEXT:{{^}} ^
+v_trig_preop_f64 v[4:5], v[8:9], v2 row_share:1
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_trig_preop_f64 v[4:5], v[8:9], v2 row_share:1
+// GFX125X-ERR-NEXT:{{^}} ^
+
v_ashr_pk_i8_i32 v1, v2, v3, v4 clamp
// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
// GFX125X-ERR-NEXT:{{^}}v_ashr_pk_i8_i32 v1, v2, v3, v4 clamp
@@ -161,3 +321,8 @@ v_cvt_scale_pk8_f32_fp4 v[10:17], s20, v8
// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_f32_fp4 v[10:17], s20, v8
// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk16_bf16_bf6 v[10:17], s[20:22], 0xcf00
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk16_bf16_bf6 v[10:17], s[20:22], 0xcf00
+// GFX125X-ERR-NEXT:{{^}} ^
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop2_err.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop2_err.s
new file mode 100644
index 0000000..157b4d6
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop2_err.s
@@ -0,0 +1,13 @@
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX1250-ERR --implicit-check-not=error: --strict-whitespace %s
+
+v_fmaak_f32_e64_dpp v4, v2, v6, 3 row_share:1
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: e64_dpp variant of this instruction is not supported
+
+v_fmamk_f32_e64_dpp v4, v2, 3, v6 row_share:1
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: e64_dpp variant of this instruction is not supported
+
+v_fmaak_f16_e64_dpp v4, v2, v6, 3 row_share:1
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: e64_dpp variant of this instruction is not supported
+
+v_fmamk_f16_e64_dpp v4, v2, 3, v6 row_share:1
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: e64_dpp variant of this instruction is not supported
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3cx.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3cx.s
new file mode 100644
index 0000000..4aea7b3
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3cx.s
@@ -0,0 +1,3413 @@
+// NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5
+// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding < %s | FileCheck --check-prefix=GFX1250 %s
+
+v_cmpx_class_f16_e64 v1, v2
+// GFX1250: v_cmpx_class_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0xfd,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_class_f16_e64 v255, v2
+// GFX1250: v_cmpx_class_f16_e64 v255, v2 ; encoding: [0x7e,0x00,0xfd,0xd4,0xff,0x05,0x02,0x00]
+
+v_cmpx_class_f16_e64 s1, v2
+// GFX1250: v_cmpx_class_f16_e64 s1, v2 ; encoding: [0x7e,0x00,0xfd,0xd4,0x01,0x04,0x02,0x00]
+
+v_cmpx_class_f16_e64 s105, v255
+// GFX1250: v_cmpx_class_f16_e64 s105, v255 ; encoding: [0x7e,0x00,0xfd,0xd4,0x69,0xfe,0x03,0x00]
+
+v_cmpx_class_f16_e64 vcc_lo, s2
+// GFX1250: v_cmpx_class_f16_e64 vcc_lo, s2 ; encoding: [0x7e,0x00,0xfd,0xd4,0x6a,0x04,0x00,0x00]
+
+v_cmpx_class_f16_e64 vcc_hi, s105
+// GFX1250: v_cmpx_class_f16_e64 vcc_hi, s105 ; encoding: [0x7e,0x00,0xfd,0xd4,0x6b,0xd2,0x00,0x00]
+
+v_cmpx_class_f16_e64 ttmp15, ttmp15
+// GFX1250: v_cmpx_class_f16_e64 ttmp15, ttmp15 ; encoding: [0x7e,0x00,0xfd,0xd4,0x7b,0xf6,0x00,0x00]
+
+v_cmpx_class_f16_e64 m0, src_scc
+// GFX1250: v_cmpx_class_f16_e64 m0, src_scc ; encoding: [0x7e,0x00,0xfd,0xd4,0x7d,0xfa,0x01,0x00]
+
+v_cmpx_class_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_class_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xfd,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_class_f16_e64 exec_hi, null
+// GFX1250: v_cmpx_class_f16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xfd,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_class_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_class_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xfd,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_class_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_class_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xfd,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_class_f16_e64 0.5, m0
+// GFX1250: v_cmpx_class_f16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xfd,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_class_f16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_class_f16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xfd,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_class_f16_e64 -|0xfe0b|, vcc_hi
+// GFX1250: v_cmpx_class_f16_e64 -|0xfe0b|, vcc_hi ; encoding: [0x7e,0x01,0xfd,0xd4,0xff,0xd6,0x00,0x20,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_class_f32_e64 v1, v2
+// GFX1250: v_cmpx_class_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0xfe,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_class_f32_e64 v255, v255
+// GFX1250: v_cmpx_class_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0xfe,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_class_f32_e64 s1, s2
+// GFX1250: v_cmpx_class_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0xfe,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_class_f32_e64 s105, s105
+// GFX1250: v_cmpx_class_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0xfe,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_class_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_class_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xfe,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_class_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_class_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xfe,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_class_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_class_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xfe,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_class_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_class_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xfe,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_class_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_class_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xfe,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_class_f32_e64 exec_hi, null
+// GFX1250: v_cmpx_class_f32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xfe,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_class_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_class_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xfe,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_class_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_class_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xfe,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_class_f32_e64 0.5, m0
+// GFX1250: v_cmpx_class_f32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xfe,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_class_f32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_class_f32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xfe,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_class_f32_e64 -|0xaf123456|, vcc_hi
+// GFX1250: v_cmpx_class_f32_e64 -|0xaf123456|, vcc_hi ; encoding: [0x7e,0x01,0xfe,0xd4,0xff,0xd6,0x00,0x20,0x56,0x34,0x12,0xaf]
+
+v_cmpx_class_f64_e64 v[2:3], v2
+// GFX1250: v_cmpx_class_f64_e64 v[2:3], v2 ; encoding: [0x7e,0x00,0xff,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_class_f64_e64 v[2:3], v255
+// GFX1250: v_cmpx_class_f64_e64 v[2:3], v255 ; encoding: [0x7e,0x00,0xff,0xd4,0x02,0xff,0x03,0x00]
+
+v_cmpx_class_f64_e64 v[2:3], s2
+// GFX1250: v_cmpx_class_f64_e64 v[2:3], s2 ; encoding: [0x7e,0x00,0xff,0xd4,0x02,0x05,0x00,0x00]
+
+v_cmpx_class_f64_e64 v[2:3], s105
+// GFX1250: v_cmpx_class_f64_e64 v[2:3], s105 ; encoding: [0x7e,0x00,0xff,0xd4,0x02,0xd3,0x00,0x00]
+
+v_cmpx_class_f64_e64 v[254:255], ttmp15
+// GFX1250: v_cmpx_class_f64_e64 v[254:255], ttmp15 ; encoding: [0x7e,0x00,0xff,0xd4,0xfe,0xf7,0x00,0x00]
+
+v_cmpx_class_f64_e64 s[2:3], vcc_hi
+// GFX1250: v_cmpx_class_f64_e64 s[2:3], vcc_hi ; encoding: [0x7e,0x00,0xff,0xd4,0x02,0xd6,0x00,0x00]
+
+v_cmpx_class_f64_e64 s[104:105], vcc_lo
+// GFX1250: v_cmpx_class_f64_e64 s[104:105], vcc_lo ; encoding: [0x7e,0x00,0xff,0xd4,0x68,0xd4,0x00,0x00]
+
+v_cmpx_class_f64_e64 vcc, m0
+// GFX1250: v_cmpx_class_f64_e64 vcc, m0 ; encoding: [0x7e,0x00,0xff,0xd4,0x6a,0xfa,0x00,0x00]
+
+v_cmpx_class_f64_e64 ttmp[14:15], exec_hi
+// GFX1250: v_cmpx_class_f64_e64 ttmp[14:15], exec_hi ; encoding: [0x7e,0x00,0xff,0xd4,0x7a,0xfe,0x00,0x00]
+
+v_cmpx_class_f64_e64 exec, exec_lo
+// GFX1250: v_cmpx_class_f64_e64 exec, exec_lo ; encoding: [0x7e,0x00,0xff,0xd4,0x7e,0xfc,0x00,0x00]
+
+v_cmpx_class_f64_e64 null, null
+// GFX1250: v_cmpx_class_f64_e64 null, null ; encoding: [0x7e,0x00,0xff,0xd4,0x7c,0xf8,0x00,0x00]
+
+v_cmpx_class_f64_e64 -1, -1
+// GFX1250: v_cmpx_class_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xff,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_class_f64_e64 0.5, 0.5
+// GFX1250: v_cmpx_class_f64_e64 0.5, 0.5 ; encoding: [0x7e,0x00,0xff,0xd4,0xf0,0xe0,0x01,0x00]
+
+v_cmpx_class_f64_e64 -|src_scc|, src_scc
+// GFX1250: v_cmpx_class_f64_e64 -|src_scc|, src_scc ; encoding: [0x7e,0x01,0xff,0xd4,0xfd,0xfa,0x01,0x20]
+
+v_cmpx_class_f64_e64 0xaf123456, 0xaf123456
+// GFX1250: v_cmpx_class_f64_e64 0xaf123456, 0xaf123456 ; encoding: [0x7e,0x00,0xff,0xd4,0xff,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_f16_e64 v1, v2
+// GFX1250: v_cmpx_eq_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x82,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_eq_f16_e64 v255, v255
+// GFX1250: v_cmpx_eq_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x82,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_eq_f16_e64 s1, s2
+// GFX1250: v_cmpx_eq_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x82,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_eq_f16_e64 s105, s105
+// GFX1250: v_cmpx_eq_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x82,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_eq_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_eq_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x82,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_eq_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_eq_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x82,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_eq_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_eq_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x82,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_eq_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_eq_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x82,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_eq_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_eq_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x82,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_eq_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_eq_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x82,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_eq_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_eq_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x82,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_eq_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_eq_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x82,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_eq_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_eq_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x82,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_eq_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_eq_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x82,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_eq_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_eq_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x82,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_eq_f32_e64 v1, v2
+// GFX1250: v_cmpx_eq_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x92,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_eq_f32_e64 v255, v255
+// GFX1250: v_cmpx_eq_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x92,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_eq_f32_e64 s1, s2
+// GFX1250: v_cmpx_eq_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x92,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s105, s105
+// GFX1250: v_cmpx_eq_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x92,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_eq_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_eq_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x92,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_eq_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_eq_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x92,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_eq_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x92,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_eq_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_eq_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x92,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_eq_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_eq_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x92,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_eq_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_eq_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x92,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_eq_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_eq_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x92,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_eq_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_eq_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x92,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_eq_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_eq_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x92,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_eq_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_eq_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x92,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_eq_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_eq_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x92,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_eq_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa2,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_eq_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_eq_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa2,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_eq_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_eq_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa2,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_eq_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa2,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_eq_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_eq_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa2,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_eq_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_eq_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa2,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_eq_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa2,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_eq_f64_e64 null, 0.5
+// GFX1250: v_cmpx_eq_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa2,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_eq_f64_e64 -1, -1
+// GFX1250: v_cmpx_eq_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa2,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_eq_f64_e64 0.5, null
+// GFX1250: v_cmpx_eq_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa2,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_eq_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_eq_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa2,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_eq_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_eq_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa2,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_i16_e64 v1, v2
+// GFX1250: v_cmpx_eq_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb2,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_eq_i16_e64 v255, v255
+// GFX1250: v_cmpx_eq_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb2,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_eq_i16_e64 s1, s2
+// GFX1250: v_cmpx_eq_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb2,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s105, s105
+// GFX1250: v_cmpx_eq_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb2,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_eq_i16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_eq_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb2,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_eq_i16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_eq_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb2,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_eq_i16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_eq_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb2,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_eq_i16_e64 m0, 0.5
+// GFX1250: v_cmpx_eq_i16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xb2,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_eq_i16_e64 exec_lo, -1
+// GFX1250: v_cmpx_eq_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb2,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_eq_i16_e64 exec_hi, null
+// GFX1250: v_cmpx_eq_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb2,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_eq_i16_e64 null, exec_lo
+// GFX1250: v_cmpx_eq_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb2,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_eq_i16_e64 -1, exec_hi
+// GFX1250: v_cmpx_eq_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb2,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_eq_i16_e64 0.5, m0
+// GFX1250: v_cmpx_eq_i16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xb2,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_eq_i16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_eq_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb2,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_eq_i16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_eq_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb2,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_eq_i32_e64 v1, v2
+// GFX1250: v_cmpx_eq_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc2,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_eq_i32_e64 v255, v255
+// GFX1250: v_cmpx_eq_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc2,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_eq_i32_e64 s1, s2
+// GFX1250: v_cmpx_eq_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc2,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s105, s105
+// GFX1250: v_cmpx_eq_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc2,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_eq_i32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_eq_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc2,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_eq_i32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_eq_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc2,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_i32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_eq_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc2,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_eq_i32_e64 m0, 0.5
+// GFX1250: v_cmpx_eq_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc2,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_eq_i32_e64 exec_lo, -1
+// GFX1250: v_cmpx_eq_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc2,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_eq_i32_e64 exec_hi, null
+// GFX1250: v_cmpx_eq_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc2,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_eq_i32_e64 null, exec_lo
+// GFX1250: v_cmpx_eq_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc2,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_eq_i32_e64 -1, exec_hi
+// GFX1250: v_cmpx_eq_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc2,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_eq_i32_e64 0.5, m0
+// GFX1250: v_cmpx_eq_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc2,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_eq_i32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_eq_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc2,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_eq_i32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_eq_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc2,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_i64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_eq_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd2,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_eq_i64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_eq_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd2,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_eq_i64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_eq_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd2,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_eq_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd2,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_eq_i64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_eq_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd2,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_eq_i64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_eq_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd2,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_eq_i64_e64 exec, src_scc
+// GFX1250: v_cmpx_eq_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd2,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_eq_i64_e64 null, 0.5
+// GFX1250: v_cmpx_eq_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd2,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_eq_i64_e64 -1, -1
+// GFX1250: v_cmpx_eq_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd2,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_eq_i64_e64 0.5, null
+// GFX1250: v_cmpx_eq_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd2,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_eq_i64_e64 src_scc, exec
+// GFX1250: v_cmpx_eq_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd2,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_eq_i64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_eq_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd2,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_eq_u16_e64 v1, v2
+// GFX1250: v_cmpx_eq_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xba,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_eq_u16_e64 v255, v255
+// GFX1250: v_cmpx_eq_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xba,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_eq_u16_e64 s1, s2
+// GFX1250: v_cmpx_eq_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xba,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s105, s105
+// GFX1250: v_cmpx_eq_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xba,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_eq_u16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_eq_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xba,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_eq_u16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_eq_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xba,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_eq_u16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_eq_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xba,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_eq_u16_e64 m0, 0.5
+// GFX1250: v_cmpx_eq_u16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xba,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_eq_u16_e64 exec_lo, -1
+// GFX1250: v_cmpx_eq_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xba,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_eq_u16_e64 exec_hi, null
+// GFX1250: v_cmpx_eq_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xba,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_eq_u16_e64 null, exec_lo
+// GFX1250: v_cmpx_eq_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xba,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_eq_u16_e64 -1, exec_hi
+// GFX1250: v_cmpx_eq_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xba,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_eq_u16_e64 0.5, m0
+// GFX1250: v_cmpx_eq_u16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xba,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_eq_u16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_eq_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xba,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_eq_u16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_eq_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xba,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_eq_u32_e64 v1, v2
+// GFX1250: v_cmpx_eq_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xca,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_eq_u32_e64 v255, v255
+// GFX1250: v_cmpx_eq_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xca,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_eq_u32_e64 s1, s2
+// GFX1250: v_cmpx_eq_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xca,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s105, s105
+// GFX1250: v_cmpx_eq_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xca,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_eq_u32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_eq_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xca,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_eq_u32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_eq_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xca,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_u32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_eq_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xca,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_eq_u32_e64 m0, 0.5
+// GFX1250: v_cmpx_eq_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xca,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_eq_u32_e64 exec_lo, -1
+// GFX1250: v_cmpx_eq_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xca,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_eq_u32_e64 exec_hi, null
+// GFX1250: v_cmpx_eq_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xca,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_eq_u32_e64 null, exec_lo
+// GFX1250: v_cmpx_eq_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xca,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_eq_u32_e64 -1, exec_hi
+// GFX1250: v_cmpx_eq_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xca,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_eq_u32_e64 0.5, m0
+// GFX1250: v_cmpx_eq_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xca,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_eq_u32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_eq_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xca,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_eq_u32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_eq_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xca,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_u64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_eq_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xda,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_eq_u64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_eq_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xda,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_eq_u64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_eq_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xda,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_eq_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xda,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_eq_u64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_eq_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xda,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_eq_u64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_eq_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xda,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_eq_u64_e64 exec, src_scc
+// GFX1250: v_cmpx_eq_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xda,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_eq_u64_e64 null, 0.5
+// GFX1250: v_cmpx_eq_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xda,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_eq_u64_e64 -1, -1
+// GFX1250: v_cmpx_eq_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xda,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_eq_u64_e64 0.5, null
+// GFX1250: v_cmpx_eq_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xda,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_eq_u64_e64 src_scc, exec
+// GFX1250: v_cmpx_eq_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xda,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_eq_u64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_eq_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xda,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_ge_f16_e64 v1, v2
+// GFX1250: v_cmpx_ge_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x86,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ge_f16_e64 v255, v255
+// GFX1250: v_cmpx_ge_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x86,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ge_f16_e64 s1, s2
+// GFX1250: v_cmpx_ge_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x86,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ge_f16_e64 s105, s105
+// GFX1250: v_cmpx_ge_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x86,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ge_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ge_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x86,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ge_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_ge_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x86,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ge_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ge_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x86,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ge_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_ge_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x86,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ge_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_ge_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x86,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ge_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_ge_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x86,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ge_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_ge_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x86,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ge_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_ge_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x86,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ge_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_ge_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x86,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_ge_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_ge_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x86,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_ge_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_ge_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x86,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ge_f32_e64 v1, v2
+// GFX1250: v_cmpx_ge_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x96,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ge_f32_e64 v255, v255
+// GFX1250: v_cmpx_ge_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x96,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ge_f32_e64 s1, s2
+// GFX1250: v_cmpx_ge_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x96,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s105, s105
+// GFX1250: v_cmpx_ge_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x96,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ge_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ge_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x96,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ge_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_ge_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x96,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ge_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x96,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ge_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_ge_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x96,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ge_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_ge_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x96,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ge_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_ge_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x96,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ge_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_ge_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x96,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ge_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_ge_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x96,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ge_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_ge_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x96,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_ge_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_ge_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x96,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_ge_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_ge_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x96,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_ge_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa6,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_ge_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_ge_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa6,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_ge_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_ge_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa6,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_ge_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa6,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_ge_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_ge_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa6,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_ge_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_ge_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa6,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_ge_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa6,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_ge_f64_e64 null, 0.5
+// GFX1250: v_cmpx_ge_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa6,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_ge_f64_e64 -1, -1
+// GFX1250: v_cmpx_ge_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa6,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_ge_f64_e64 0.5, null
+// GFX1250: v_cmpx_ge_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa6,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_ge_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_ge_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa6,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_ge_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_ge_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa6,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_i16_e64 v1, v2
+// GFX1250: v_cmpx_ge_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb6,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ge_i16_e64 v255, v255
+// GFX1250: v_cmpx_ge_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb6,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ge_i16_e64 s1, s2
+// GFX1250: v_cmpx_ge_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb6,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s105, s105
+// GFX1250: v_cmpx_ge_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb6,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ge_i16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ge_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb6,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ge_i16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_ge_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb6,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ge_i16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ge_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb6,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ge_i16_e64 m0, 0.5
+// GFX1250: v_cmpx_ge_i16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xb6,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ge_i16_e64 exec_lo, -1
+// GFX1250: v_cmpx_ge_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb6,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ge_i16_e64 exec_hi, null
+// GFX1250: v_cmpx_ge_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb6,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ge_i16_e64 null, exec_lo
+// GFX1250: v_cmpx_ge_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb6,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ge_i16_e64 -1, exec_hi
+// GFX1250: v_cmpx_ge_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb6,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ge_i16_e64 0.5, m0
+// GFX1250: v_cmpx_ge_i16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xb6,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_ge_i16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_ge_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb6,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_ge_i16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_ge_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb6,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ge_i32_e64 v1, v2
+// GFX1250: v_cmpx_ge_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc6,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ge_i32_e64 v255, v255
+// GFX1250: v_cmpx_ge_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc6,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ge_i32_e64 s1, s2
+// GFX1250: v_cmpx_ge_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc6,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s105, s105
+// GFX1250: v_cmpx_ge_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc6,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ge_i32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ge_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc6,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ge_i32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_ge_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc6,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_i32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ge_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc6,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ge_i32_e64 m0, 0.5
+// GFX1250: v_cmpx_ge_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc6,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ge_i32_e64 exec_lo, -1
+// GFX1250: v_cmpx_ge_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc6,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ge_i32_e64 exec_hi, null
+// GFX1250: v_cmpx_ge_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc6,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ge_i32_e64 null, exec_lo
+// GFX1250: v_cmpx_ge_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc6,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ge_i32_e64 -1, exec_hi
+// GFX1250: v_cmpx_ge_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc6,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ge_i32_e64 0.5, m0
+// GFX1250: v_cmpx_ge_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc6,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_ge_i32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_ge_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc6,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_ge_i32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_ge_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc6,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_i64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_ge_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd6,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_ge_i64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_ge_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd6,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_ge_i64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_ge_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd6,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_ge_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd6,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_ge_i64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_ge_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd6,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_ge_i64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_ge_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd6,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_ge_i64_e64 exec, src_scc
+// GFX1250: v_cmpx_ge_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd6,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_ge_i64_e64 null, 0.5
+// GFX1250: v_cmpx_ge_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd6,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_ge_i64_e64 -1, -1
+// GFX1250: v_cmpx_ge_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd6,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_ge_i64_e64 0.5, null
+// GFX1250: v_cmpx_ge_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd6,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_ge_i64_e64 src_scc, exec
+// GFX1250: v_cmpx_ge_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd6,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_ge_i64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_ge_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd6,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_ge_u16_e64 v1, v2
+// GFX1250: v_cmpx_ge_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xbe,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ge_u16_e64 v255, v255
+// GFX1250: v_cmpx_ge_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xbe,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ge_u16_e64 s1, s2
+// GFX1250: v_cmpx_ge_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xbe,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s105, s105
+// GFX1250: v_cmpx_ge_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xbe,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ge_u16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ge_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xbe,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ge_u16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_ge_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xbe,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ge_u16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ge_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xbe,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ge_u16_e64 m0, 0.5
+// GFX1250: v_cmpx_ge_u16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xbe,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ge_u16_e64 exec_lo, -1
+// GFX1250: v_cmpx_ge_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xbe,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ge_u16_e64 exec_hi, null
+// GFX1250: v_cmpx_ge_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xbe,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ge_u16_e64 null, exec_lo
+// GFX1250: v_cmpx_ge_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xbe,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ge_u16_e64 -1, exec_hi
+// GFX1250: v_cmpx_ge_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xbe,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ge_u16_e64 0.5, m0
+// GFX1250: v_cmpx_ge_u16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xbe,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_ge_u16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_ge_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xbe,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_ge_u16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_ge_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xbe,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ge_u32_e64 v1, v2
+// GFX1250: v_cmpx_ge_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xce,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ge_u32_e64 v255, v255
+// GFX1250: v_cmpx_ge_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xce,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ge_u32_e64 s1, s2
+// GFX1250: v_cmpx_ge_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xce,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s105, s105
+// GFX1250: v_cmpx_ge_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xce,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ge_u32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ge_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xce,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ge_u32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_ge_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xce,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_u32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ge_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xce,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ge_u32_e64 m0, 0.5
+// GFX1250: v_cmpx_ge_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xce,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ge_u32_e64 exec_lo, -1
+// GFX1250: v_cmpx_ge_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xce,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ge_u32_e64 exec_hi, null
+// GFX1250: v_cmpx_ge_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xce,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ge_u32_e64 null, exec_lo
+// GFX1250: v_cmpx_ge_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xce,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ge_u32_e64 -1, exec_hi
+// GFX1250: v_cmpx_ge_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xce,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ge_u32_e64 0.5, m0
+// GFX1250: v_cmpx_ge_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xce,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_ge_u32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_ge_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xce,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_ge_u32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_ge_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xce,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_u64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_ge_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xde,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_ge_u64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_ge_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xde,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_ge_u64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_ge_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xde,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_ge_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xde,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_ge_u64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_ge_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xde,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_ge_u64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_ge_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xde,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_ge_u64_e64 exec, src_scc
+// GFX1250: v_cmpx_ge_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xde,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_ge_u64_e64 null, 0.5
+// GFX1250: v_cmpx_ge_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xde,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_ge_u64_e64 -1, -1
+// GFX1250: v_cmpx_ge_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xde,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_ge_u64_e64 0.5, null
+// GFX1250: v_cmpx_ge_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xde,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_ge_u64_e64 src_scc, exec
+// GFX1250: v_cmpx_ge_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xde,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_ge_u64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_ge_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xde,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_gt_f16_e64 v1, v2
+// GFX1250: v_cmpx_gt_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x84,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_gt_f16_e64 v255, v255
+// GFX1250: v_cmpx_gt_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x84,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_gt_f16_e64 s1, s2
+// GFX1250: v_cmpx_gt_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x84,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_gt_f16_e64 s105, s105
+// GFX1250: v_cmpx_gt_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x84,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_gt_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_gt_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x84,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_gt_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_gt_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x84,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_gt_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_gt_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x84,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_gt_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_gt_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x84,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_gt_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_gt_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x84,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_gt_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_gt_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x84,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_gt_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_gt_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x84,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_gt_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_gt_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x84,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_gt_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_gt_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x84,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_gt_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_gt_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x84,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_gt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_gt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x84,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_gt_f32_e64 v1, v2
+// GFX1250: v_cmpx_gt_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x94,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_gt_f32_e64 v255, v255
+// GFX1250: v_cmpx_gt_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x94,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_gt_f32_e64 s1, s2
+// GFX1250: v_cmpx_gt_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x94,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s105, s105
+// GFX1250: v_cmpx_gt_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x94,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_gt_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_gt_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x94,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_gt_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_gt_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x94,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_gt_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x94,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_gt_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_gt_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x94,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_gt_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_gt_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x94,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_gt_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_gt_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x94,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_gt_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_gt_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x94,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_gt_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_gt_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x94,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_gt_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_gt_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x94,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_gt_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_gt_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x94,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_gt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_gt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x94,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_gt_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa4,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_gt_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_gt_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa4,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_gt_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_gt_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa4,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_gt_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa4,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_gt_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_gt_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa4,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_gt_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_gt_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa4,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_gt_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa4,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_gt_f64_e64 null, 0.5
+// GFX1250: v_cmpx_gt_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa4,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_gt_f64_e64 -1, -1
+// GFX1250: v_cmpx_gt_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa4,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_gt_f64_e64 0.5, null
+// GFX1250: v_cmpx_gt_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa4,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_gt_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_gt_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa4,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_gt_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_gt_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa4,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_i16_e64 v1, v2
+// GFX1250: v_cmpx_gt_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb4,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_gt_i16_e64 v255, v255
+// GFX1250: v_cmpx_gt_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb4,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_gt_i16_e64 s1, s2
+// GFX1250: v_cmpx_gt_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb4,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s105, s105
+// GFX1250: v_cmpx_gt_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb4,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_gt_i16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_gt_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb4,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_gt_i16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_gt_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb4,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_gt_i16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_gt_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb4,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_gt_i16_e64 m0, 0.5
+// GFX1250: v_cmpx_gt_i16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xb4,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_gt_i16_e64 exec_lo, -1
+// GFX1250: v_cmpx_gt_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb4,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_gt_i16_e64 exec_hi, null
+// GFX1250: v_cmpx_gt_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb4,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_gt_i16_e64 null, exec_lo
+// GFX1250: v_cmpx_gt_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb4,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_gt_i16_e64 -1, exec_hi
+// GFX1250: v_cmpx_gt_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb4,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_gt_i16_e64 0.5, m0
+// GFX1250: v_cmpx_gt_i16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xb4,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_gt_i16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_gt_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb4,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_gt_i16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_gt_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb4,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_gt_i32_e64 v1, v2
+// GFX1250: v_cmpx_gt_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc4,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_gt_i32_e64 v255, v255
+// GFX1250: v_cmpx_gt_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc4,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_gt_i32_e64 s1, s2
+// GFX1250: v_cmpx_gt_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc4,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s105, s105
+// GFX1250: v_cmpx_gt_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc4,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_gt_i32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_gt_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc4,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_gt_i32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_gt_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc4,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_i32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_gt_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc4,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_gt_i32_e64 m0, 0.5
+// GFX1250: v_cmpx_gt_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc4,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_gt_i32_e64 exec_lo, -1
+// GFX1250: v_cmpx_gt_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc4,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_gt_i32_e64 exec_hi, null
+// GFX1250: v_cmpx_gt_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc4,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_gt_i32_e64 null, exec_lo
+// GFX1250: v_cmpx_gt_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc4,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_gt_i32_e64 -1, exec_hi
+// GFX1250: v_cmpx_gt_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc4,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_gt_i32_e64 0.5, m0
+// GFX1250: v_cmpx_gt_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc4,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_gt_i32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_gt_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc4,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_gt_i32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_gt_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc4,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_i64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_gt_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd4,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_gt_i64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_gt_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd4,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_gt_i64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_gt_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd4,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_gt_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd4,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_gt_i64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_gt_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd4,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_gt_i64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_gt_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd4,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_gt_i64_e64 exec, src_scc
+// GFX1250: v_cmpx_gt_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd4,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_gt_i64_e64 null, 0.5
+// GFX1250: v_cmpx_gt_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd4,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_gt_i64_e64 -1, -1
+// GFX1250: v_cmpx_gt_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd4,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_gt_i64_e64 0.5, null
+// GFX1250: v_cmpx_gt_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd4,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_gt_i64_e64 src_scc, exec
+// GFX1250: v_cmpx_gt_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd4,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_gt_i64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_gt_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd4,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_gt_u16_e64 v1, v2
+// GFX1250: v_cmpx_gt_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xbc,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_gt_u16_e64 v255, v255
+// GFX1250: v_cmpx_gt_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xbc,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_gt_u16_e64 s1, s2
+// GFX1250: v_cmpx_gt_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xbc,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s105, s105
+// GFX1250: v_cmpx_gt_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xbc,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_gt_u16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_gt_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xbc,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_gt_u16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_gt_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xbc,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_gt_u16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_gt_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xbc,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_gt_u16_e64 m0, 0.5
+// GFX1250: v_cmpx_gt_u16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xbc,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_gt_u16_e64 exec_lo, -1
+// GFX1250: v_cmpx_gt_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xbc,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_gt_u16_e64 exec_hi, null
+// GFX1250: v_cmpx_gt_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xbc,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_gt_u16_e64 null, exec_lo
+// GFX1250: v_cmpx_gt_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xbc,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_gt_u16_e64 -1, exec_hi
+// GFX1250: v_cmpx_gt_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xbc,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_gt_u16_e64 0.5, m0
+// GFX1250: v_cmpx_gt_u16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xbc,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_gt_u16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_gt_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xbc,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_gt_u16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_gt_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xbc,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_gt_u32_e64 v1, v2
+// GFX1250: v_cmpx_gt_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xcc,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_gt_u32_e64 v255, v255
+// GFX1250: v_cmpx_gt_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xcc,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_gt_u32_e64 s1, s2
+// GFX1250: v_cmpx_gt_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xcc,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s105, s105
+// GFX1250: v_cmpx_gt_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xcc,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_gt_u32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_gt_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xcc,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_gt_u32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_gt_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xcc,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_u32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_gt_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xcc,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_gt_u32_e64 m0, 0.5
+// GFX1250: v_cmpx_gt_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xcc,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_gt_u32_e64 exec_lo, -1
+// GFX1250: v_cmpx_gt_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xcc,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_gt_u32_e64 exec_hi, null
+// GFX1250: v_cmpx_gt_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xcc,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_gt_u32_e64 null, exec_lo
+// GFX1250: v_cmpx_gt_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xcc,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_gt_u32_e64 -1, exec_hi
+// GFX1250: v_cmpx_gt_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xcc,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_gt_u32_e64 0.5, m0
+// GFX1250: v_cmpx_gt_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xcc,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_gt_u32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_gt_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xcc,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_gt_u32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_gt_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xcc,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_u64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_gt_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xdc,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_gt_u64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_gt_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xdc,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_gt_u64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_gt_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xdc,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_gt_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xdc,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_gt_u64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_gt_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xdc,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_gt_u64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_gt_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xdc,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_gt_u64_e64 exec, src_scc
+// GFX1250: v_cmpx_gt_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xdc,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_gt_u64_e64 null, 0.5
+// GFX1250: v_cmpx_gt_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xdc,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_gt_u64_e64 -1, -1
+// GFX1250: v_cmpx_gt_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xdc,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_gt_u64_e64 0.5, null
+// GFX1250: v_cmpx_gt_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xdc,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_gt_u64_e64 src_scc, exec
+// GFX1250: v_cmpx_gt_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xdc,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_gt_u64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_gt_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xdc,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_le_f16_e64 v1, v2
+// GFX1250: v_cmpx_le_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x83,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_le_f16_e64 v255, v255
+// GFX1250: v_cmpx_le_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x83,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_le_f16_e64 s1, s2
+// GFX1250: v_cmpx_le_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x83,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_le_f16_e64 s105, s105
+// GFX1250: v_cmpx_le_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x83,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_le_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_le_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x83,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_le_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_le_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x83,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_le_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_le_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x83,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_le_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_le_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x83,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_le_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_le_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x83,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_le_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_le_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x83,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_le_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_le_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x83,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_le_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_le_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x83,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_le_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_le_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x83,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_le_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_le_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x83,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_le_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_le_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x83,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_le_f32_e64 v1, v2
+// GFX1250: v_cmpx_le_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x93,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_le_f32_e64 v255, v255
+// GFX1250: v_cmpx_le_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x93,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_le_f32_e64 s1, s2
+// GFX1250: v_cmpx_le_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x93,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_le_f32_e64 s105, s105
+// GFX1250: v_cmpx_le_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x93,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_le_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_le_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x93,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_le_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_le_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x93,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_le_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x93,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_le_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_le_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x93,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_le_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_le_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x93,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_le_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_le_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x93,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_le_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_le_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x93,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_le_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_le_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x93,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_le_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_le_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x93,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_le_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_le_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x93,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_le_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_le_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x93,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_le_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa3,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_le_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_le_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa3,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_le_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_le_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa3,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_le_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa3,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_le_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_le_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa3,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_le_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_le_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa3,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_le_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa3,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_le_f64_e64 null, 0.5
+// GFX1250: v_cmpx_le_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa3,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_le_f64_e64 -1, -1
+// GFX1250: v_cmpx_le_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa3,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_le_f64_e64 0.5, null
+// GFX1250: v_cmpx_le_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa3,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_le_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_le_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa3,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_le_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_le_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa3,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_i16_e64 v1, v2
+// GFX1250: v_cmpx_le_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb3,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_le_i16_e64 v255, v255
+// GFX1250: v_cmpx_le_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb3,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_le_i16_e64 s1, s2
+// GFX1250: v_cmpx_le_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb3,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_le_i16_e64 s105, s105
+// GFX1250: v_cmpx_le_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb3,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_le_i16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_le_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb3,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_le_i16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_le_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb3,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_le_i16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_le_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb3,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_le_i16_e64 m0, 0.5
+// GFX1250: v_cmpx_le_i16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xb3,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_le_i16_e64 exec_lo, -1
+// GFX1250: v_cmpx_le_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb3,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_le_i16_e64 exec_hi, null
+// GFX1250: v_cmpx_le_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb3,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_le_i16_e64 null, exec_lo
+// GFX1250: v_cmpx_le_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb3,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_le_i16_e64 -1, exec_hi
+// GFX1250: v_cmpx_le_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb3,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_le_i16_e64 0.5, m0
+// GFX1250: v_cmpx_le_i16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xb3,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_le_i16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_le_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb3,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_le_i16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_le_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb3,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_le_i32_e64 v1, v2
+// GFX1250: v_cmpx_le_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc3,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_le_i32_e64 v255, v255
+// GFX1250: v_cmpx_le_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc3,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_le_i32_e64 s1, s2
+// GFX1250: v_cmpx_le_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc3,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 s105, s105
+// GFX1250: v_cmpx_le_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc3,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_le_i32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_le_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc3,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_le_i32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_le_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc3,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_i32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_le_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc3,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_le_i32_e64 m0, 0.5
+// GFX1250: v_cmpx_le_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc3,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_le_i32_e64 exec_lo, -1
+// GFX1250: v_cmpx_le_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc3,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_le_i32_e64 exec_hi, null
+// GFX1250: v_cmpx_le_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc3,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_le_i32_e64 null, exec_lo
+// GFX1250: v_cmpx_le_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc3,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_le_i32_e64 -1, exec_hi
+// GFX1250: v_cmpx_le_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc3,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_le_i32_e64 0.5, m0
+// GFX1250: v_cmpx_le_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc3,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_le_i32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_le_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc3,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_le_i32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_le_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc3,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_i64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_le_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd3,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_le_i64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_le_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd3,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_le_i64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_le_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd3,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_le_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd3,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_le_i64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_le_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd3,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_le_i64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_le_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd3,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_le_i64_e64 exec, src_scc
+// GFX1250: v_cmpx_le_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd3,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_le_i64_e64 null, 0.5
+// GFX1250: v_cmpx_le_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd3,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_le_i64_e64 -1, -1
+// GFX1250: v_cmpx_le_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd3,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_le_i64_e64 0.5, null
+// GFX1250: v_cmpx_le_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd3,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_le_i64_e64 src_scc, exec
+// GFX1250: v_cmpx_le_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd3,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_le_i64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_le_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd3,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_le_u16_e64 v1, v2
+// GFX1250: v_cmpx_le_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xbb,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_le_u16_e64 v255, v255
+// GFX1250: v_cmpx_le_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xbb,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_le_u16_e64 s1, s2
+// GFX1250: v_cmpx_le_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xbb,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_le_u16_e64 s105, s105
+// GFX1250: v_cmpx_le_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xbb,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_le_u16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_le_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xbb,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_le_u16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_le_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xbb,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_le_u16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_le_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xbb,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_le_u16_e64 m0, 0.5
+// GFX1250: v_cmpx_le_u16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xbb,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_le_u16_e64 exec_lo, -1
+// GFX1250: v_cmpx_le_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xbb,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_le_u16_e64 exec_hi, null
+// GFX1250: v_cmpx_le_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xbb,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_le_u16_e64 null, exec_lo
+// GFX1250: v_cmpx_le_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xbb,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_le_u16_e64 -1, exec_hi
+// GFX1250: v_cmpx_le_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xbb,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_le_u16_e64 0.5, m0
+// GFX1250: v_cmpx_le_u16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xbb,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_le_u16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_le_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xbb,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_le_u16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_le_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xbb,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_le_u32_e64 v1, v2
+// GFX1250: v_cmpx_le_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xcb,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_le_u32_e64 v255, v255
+// GFX1250: v_cmpx_le_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xcb,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_le_u32_e64 s1, s2
+// GFX1250: v_cmpx_le_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xcb,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 s105, s105
+// GFX1250: v_cmpx_le_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xcb,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_le_u32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_le_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xcb,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_le_u32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_le_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xcb,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_u32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_le_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xcb,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_le_u32_e64 m0, 0.5
+// GFX1250: v_cmpx_le_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xcb,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_le_u32_e64 exec_lo, -1
+// GFX1250: v_cmpx_le_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xcb,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_le_u32_e64 exec_hi, null
+// GFX1250: v_cmpx_le_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xcb,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_le_u32_e64 null, exec_lo
+// GFX1250: v_cmpx_le_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xcb,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_le_u32_e64 -1, exec_hi
+// GFX1250: v_cmpx_le_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xcb,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_le_u32_e64 0.5, m0
+// GFX1250: v_cmpx_le_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xcb,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_le_u32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_le_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xcb,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_le_u32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_le_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xcb,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_u64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_le_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xdb,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_le_u64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_le_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xdb,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_le_u64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_le_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xdb,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_le_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xdb,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_le_u64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_le_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xdb,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_le_u64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_le_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xdb,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_le_u64_e64 exec, src_scc
+// GFX1250: v_cmpx_le_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xdb,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_le_u64_e64 null, 0.5
+// GFX1250: v_cmpx_le_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xdb,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_le_u64_e64 -1, -1
+// GFX1250: v_cmpx_le_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xdb,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_le_u64_e64 0.5, null
+// GFX1250: v_cmpx_le_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xdb,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_le_u64_e64 src_scc, exec
+// GFX1250: v_cmpx_le_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xdb,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_le_u64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_le_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xdb,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_lg_f16_e64 v1, v2
+// GFX1250: v_cmpx_lg_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x85,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_lg_f16_e64 v255, v255
+// GFX1250: v_cmpx_lg_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x85,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_lg_f16_e64 s1, s2
+// GFX1250: v_cmpx_lg_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x85,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_lg_f16_e64 s105, s105
+// GFX1250: v_cmpx_lg_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x85,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_lg_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_lg_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x85,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_lg_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_lg_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x85,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_lg_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_lg_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x85,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_lg_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_lg_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x85,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_lg_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_lg_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x85,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_lg_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_lg_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x85,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_lg_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_lg_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x85,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_lg_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_lg_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x85,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_lg_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_lg_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x85,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_lg_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_lg_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x85,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_lg_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_lg_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x85,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_lg_f32_e64 v1, v2
+// GFX1250: v_cmpx_lg_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x95,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_lg_f32_e64 v255, v255
+// GFX1250: v_cmpx_lg_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x95,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_lg_f32_e64 s1, s2
+// GFX1250: v_cmpx_lg_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x95,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s105, s105
+// GFX1250: v_cmpx_lg_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x95,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_lg_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_lg_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x95,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_lg_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_lg_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x95,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lg_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_lg_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x95,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_lg_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_lg_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x95,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_lg_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_lg_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x95,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_lg_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_lg_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x95,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_lg_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_lg_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x95,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_lg_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_lg_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x95,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_lg_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_lg_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x95,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_lg_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_lg_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x95,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_lg_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_lg_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x95,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lg_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_lg_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa5,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_lg_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_lg_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa5,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_lg_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_lg_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa5,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_lg_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa5,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_lg_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_lg_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa5,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_lg_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_lg_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa5,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lg_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_lg_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa5,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_lg_f64_e64 null, 0.5
+// GFX1250: v_cmpx_lg_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa5,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_lg_f64_e64 -1, -1
+// GFX1250: v_cmpx_lg_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa5,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_lg_f64_e64 0.5, null
+// GFX1250: v_cmpx_lg_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa5,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_lg_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_lg_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa5,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_lg_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_lg_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa5,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_f16_e64 v1, v2
+// GFX1250: v_cmpx_lt_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x81,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_lt_f16_e64 v255, v255
+// GFX1250: v_cmpx_lt_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x81,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_lt_f16_e64 s1, s2
+// GFX1250: v_cmpx_lt_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x81,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_lt_f16_e64 s105, s105
+// GFX1250: v_cmpx_lt_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x81,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_lt_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_lt_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x81,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_lt_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_lt_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x81,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_lt_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_lt_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x81,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_lt_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_lt_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x81,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_lt_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_lt_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x81,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_lt_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_lt_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x81,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_lt_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_lt_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x81,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_lt_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_lt_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x81,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_lt_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_lt_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x81,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_lt_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_lt_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x81,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_lt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_lt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x81,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_lt_f32_e64 v1, v2
+// GFX1250: v_cmpx_lt_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x91,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_lt_f32_e64 v255, v255
+// GFX1250: v_cmpx_lt_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x91,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_lt_f32_e64 s1, s2
+// GFX1250: v_cmpx_lt_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x91,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s105, s105
+// GFX1250: v_cmpx_lt_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x91,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_lt_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_lt_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x91,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_lt_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_lt_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x91,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_lt_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x91,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_lt_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_lt_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x91,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_lt_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_lt_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x91,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_lt_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_lt_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x91,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_lt_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_lt_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x91,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_lt_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_lt_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x91,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_lt_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_lt_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x91,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_lt_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_lt_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x91,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_lt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_lt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x91,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_lt_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa1,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_lt_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_lt_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa1,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_lt_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_lt_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa1,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_lt_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa1,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_lt_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_lt_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa1,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_lt_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_lt_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa1,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_lt_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa1,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_lt_f64_e64 null, 0.5
+// GFX1250: v_cmpx_lt_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa1,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_lt_f64_e64 -1, -1
+// GFX1250: v_cmpx_lt_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa1,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_lt_f64_e64 0.5, null
+// GFX1250: v_cmpx_lt_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa1,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_lt_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_lt_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa1,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_lt_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_lt_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa1,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_i16_e64 v1, v2
+// GFX1250: v_cmpx_lt_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb1,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_lt_i16_e64 v255, v255
+// GFX1250: v_cmpx_lt_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb1,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_lt_i16_e64 s1, s2
+// GFX1250: v_cmpx_lt_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb1,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s105, s105
+// GFX1250: v_cmpx_lt_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb1,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_lt_i16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_lt_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb1,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_lt_i16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_lt_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb1,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_lt_i16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_lt_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb1,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_lt_i16_e64 m0, 0.5
+// GFX1250: v_cmpx_lt_i16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xb1,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_lt_i16_e64 exec_lo, -1
+// GFX1250: v_cmpx_lt_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb1,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_lt_i16_e64 exec_hi, null
+// GFX1250: v_cmpx_lt_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb1,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_lt_i16_e64 null, exec_lo
+// GFX1250: v_cmpx_lt_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb1,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_lt_i16_e64 -1, exec_hi
+// GFX1250: v_cmpx_lt_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb1,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_lt_i16_e64 0.5, m0
+// GFX1250: v_cmpx_lt_i16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xb1,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_lt_i16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_lt_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb1,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_lt_i16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_lt_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb1,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_lt_i32_e64 v1, v2
+// GFX1250: v_cmpx_lt_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc1,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_lt_i32_e64 v255, v255
+// GFX1250: v_cmpx_lt_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc1,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_lt_i32_e64 s1, s2
+// GFX1250: v_cmpx_lt_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc1,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s105, s105
+// GFX1250: v_cmpx_lt_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc1,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_lt_i32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_lt_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc1,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_lt_i32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_lt_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc1,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_i32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_lt_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc1,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_lt_i32_e64 m0, 0.5
+// GFX1250: v_cmpx_lt_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc1,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_lt_i32_e64 exec_lo, -1
+// GFX1250: v_cmpx_lt_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc1,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_lt_i32_e64 exec_hi, null
+// GFX1250: v_cmpx_lt_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc1,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_lt_i32_e64 null, exec_lo
+// GFX1250: v_cmpx_lt_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc1,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_lt_i32_e64 -1, exec_hi
+// GFX1250: v_cmpx_lt_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc1,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_lt_i32_e64 0.5, m0
+// GFX1250: v_cmpx_lt_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc1,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_lt_i32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_lt_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc1,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_lt_i32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_lt_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc1,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_i64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_lt_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd1,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_lt_i64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_lt_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd1,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_lt_i64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_lt_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd1,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_lt_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd1,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_lt_i64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_lt_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd1,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_lt_i64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_lt_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd1,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_lt_i64_e64 exec, src_scc
+// GFX1250: v_cmpx_lt_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd1,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_lt_i64_e64 null, 0.5
+// GFX1250: v_cmpx_lt_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd1,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_lt_i64_e64 -1, -1
+// GFX1250: v_cmpx_lt_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd1,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_lt_i64_e64 0.5, null
+// GFX1250: v_cmpx_lt_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd1,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_lt_i64_e64 src_scc, exec
+// GFX1250: v_cmpx_lt_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd1,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_lt_i64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_lt_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd1,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_lt_u16_e64 v1, v2
+// GFX1250: v_cmpx_lt_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb9,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_lt_u16_e64 v255, v255
+// GFX1250: v_cmpx_lt_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb9,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_lt_u16_e64 s1, s2
+// GFX1250: v_cmpx_lt_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb9,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s105, s105
+// GFX1250: v_cmpx_lt_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb9,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_lt_u16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_lt_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb9,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_lt_u16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_lt_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb9,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_lt_u16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_lt_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb9,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_lt_u16_e64 m0, 0.5
+// GFX1250: v_cmpx_lt_u16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xb9,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_lt_u16_e64 exec_lo, -1
+// GFX1250: v_cmpx_lt_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb9,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_lt_u16_e64 exec_hi, null
+// GFX1250: v_cmpx_lt_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb9,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_lt_u16_e64 null, exec_lo
+// GFX1250: v_cmpx_lt_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb9,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_lt_u16_e64 -1, exec_hi
+// GFX1250: v_cmpx_lt_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb9,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_lt_u16_e64 0.5, m0
+// GFX1250: v_cmpx_lt_u16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xb9,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_lt_u16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_lt_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb9,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_lt_u16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_lt_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb9,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_lt_u32_e64 v1, v2
+// GFX1250: v_cmpx_lt_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc9,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_lt_u32_e64 v255, v255
+// GFX1250: v_cmpx_lt_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc9,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_lt_u32_e64 s1, s2
+// GFX1250: v_cmpx_lt_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc9,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s105, s105
+// GFX1250: v_cmpx_lt_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc9,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_lt_u32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_lt_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc9,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_lt_u32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_lt_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc9,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_u32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_lt_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc9,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_lt_u32_e64 m0, 0.5
+// GFX1250: v_cmpx_lt_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc9,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_lt_u32_e64 exec_lo, -1
+// GFX1250: v_cmpx_lt_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc9,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_lt_u32_e64 exec_hi, null
+// GFX1250: v_cmpx_lt_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc9,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_lt_u32_e64 null, exec_lo
+// GFX1250: v_cmpx_lt_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc9,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_lt_u32_e64 -1, exec_hi
+// GFX1250: v_cmpx_lt_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc9,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_lt_u32_e64 0.5, m0
+// GFX1250: v_cmpx_lt_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc9,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_lt_u32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_lt_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc9,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_lt_u32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_lt_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc9,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_u64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_lt_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd9,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_lt_u64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_lt_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd9,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_lt_u64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_lt_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd9,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_lt_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd9,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_lt_u64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_lt_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd9,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_lt_u64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_lt_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd9,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_lt_u64_e64 exec, src_scc
+// GFX1250: v_cmpx_lt_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd9,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_lt_u64_e64 null, 0.5
+// GFX1250: v_cmpx_lt_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd9,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_lt_u64_e64 -1, -1
+// GFX1250: v_cmpx_lt_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd9,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_lt_u64_e64 0.5, null
+// GFX1250: v_cmpx_lt_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xd9,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_lt_u64_e64 src_scc, exec
+// GFX1250: v_cmpx_lt_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd9,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_lt_u64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_lt_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd9,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_ne_i16_e64 v1, v2
+// GFX1250: v_cmpx_ne_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb5,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ne_i16_e64 v255, v255
+// GFX1250: v_cmpx_ne_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb5,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ne_i16_e64 s1, s2
+// GFX1250: v_cmpx_ne_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb5,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s105, s105
+// GFX1250: v_cmpx_ne_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb5,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ne_i16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ne_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb5,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ne_i16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_ne_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb5,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ne_i16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ne_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb5,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ne_i16_e64 m0, 0.5
+// GFX1250: v_cmpx_ne_i16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xb5,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ne_i16_e64 exec_lo, -1
+// GFX1250: v_cmpx_ne_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb5,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ne_i16_e64 exec_hi, null
+// GFX1250: v_cmpx_ne_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb5,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ne_i16_e64 null, exec_lo
+// GFX1250: v_cmpx_ne_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb5,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ne_i16_e64 -1, exec_hi
+// GFX1250: v_cmpx_ne_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb5,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ne_i16_e64 0.5, m0
+// GFX1250: v_cmpx_ne_i16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xb5,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_ne_i16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_ne_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb5,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_ne_i16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_ne_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb5,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ne_i32_e64 v1, v2
+// GFX1250: v_cmpx_ne_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc5,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ne_i32_e64 v255, v255
+// GFX1250: v_cmpx_ne_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc5,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ne_i32_e64 s1, s2
+// GFX1250: v_cmpx_ne_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc5,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s105, s105
+// GFX1250: v_cmpx_ne_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc5,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ne_i32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ne_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc5,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ne_i32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_ne_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc5,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ne_i32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ne_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc5,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ne_i32_e64 m0, 0.5
+// GFX1250: v_cmpx_ne_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc5,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ne_i32_e64 exec_lo, -1
+// GFX1250: v_cmpx_ne_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc5,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ne_i32_e64 exec_hi, null
+// GFX1250: v_cmpx_ne_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc5,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ne_i32_e64 null, exec_lo
+// GFX1250: v_cmpx_ne_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc5,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ne_i32_e64 -1, exec_hi
+// GFX1250: v_cmpx_ne_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc5,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ne_i32_e64 0.5, m0
+// GFX1250: v_cmpx_ne_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc5,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_ne_i32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_ne_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc5,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_ne_i32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_ne_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc5,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ne_i64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_ne_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd5,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_ne_i64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_ne_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd5,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_ne_i64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_ne_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd5,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_ne_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd5,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_ne_i64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_ne_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd5,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_ne_i64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_ne_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd5,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_ne_i64_e64 exec, src_scc
+// GFX1250: v_cmpx_ne_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd5,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_ne_i64_e64 null, 0.5
+// GFX1250: v_cmpx_ne_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd5,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_ne_i64_e64 -1, -1
+// GFX1250: v_cmpx_ne_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd5,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_ne_i64_e64 0.5, null
+// GFX1250: v_cmpx_ne_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd5,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_ne_i64_e64 src_scc, exec
+// GFX1250: v_cmpx_ne_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd5,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_ne_i64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_ne_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd5,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_ne_u16_e64 v1, v2
+// GFX1250: v_cmpx_ne_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xbd,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ne_u16_e64 v255, v255
+// GFX1250: v_cmpx_ne_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xbd,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ne_u16_e64 s1, s2
+// GFX1250: v_cmpx_ne_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xbd,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s105, s105
+// GFX1250: v_cmpx_ne_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xbd,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ne_u16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ne_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xbd,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ne_u16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_ne_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xbd,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ne_u16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ne_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xbd,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ne_u16_e64 m0, 0.5
+// GFX1250: v_cmpx_ne_u16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xbd,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ne_u16_e64 exec_lo, -1
+// GFX1250: v_cmpx_ne_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xbd,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ne_u16_e64 exec_hi, null
+// GFX1250: v_cmpx_ne_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xbd,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ne_u16_e64 null, exec_lo
+// GFX1250: v_cmpx_ne_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xbd,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ne_u16_e64 -1, exec_hi
+// GFX1250: v_cmpx_ne_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xbd,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ne_u16_e64 0.5, m0
+// GFX1250: v_cmpx_ne_u16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xbd,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_ne_u16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_ne_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xbd,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_ne_u16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_ne_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xbd,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ne_u32_e64 v1, v2
+// GFX1250: v_cmpx_ne_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xcd,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ne_u32_e64 v255, v255
+// GFX1250: v_cmpx_ne_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xcd,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ne_u32_e64 s1, s2
+// GFX1250: v_cmpx_ne_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xcd,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s105, s105
+// GFX1250: v_cmpx_ne_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xcd,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ne_u32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ne_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xcd,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ne_u32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_ne_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xcd,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ne_u32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ne_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xcd,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ne_u32_e64 m0, 0.5
+// GFX1250: v_cmpx_ne_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xcd,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ne_u32_e64 exec_lo, -1
+// GFX1250: v_cmpx_ne_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xcd,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ne_u32_e64 exec_hi, null
+// GFX1250: v_cmpx_ne_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xcd,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ne_u32_e64 null, exec_lo
+// GFX1250: v_cmpx_ne_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xcd,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ne_u32_e64 -1, exec_hi
+// GFX1250: v_cmpx_ne_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xcd,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ne_u32_e64 0.5, m0
+// GFX1250: v_cmpx_ne_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xcd,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_ne_u32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_ne_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xcd,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_ne_u32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_ne_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xcd,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ne_u64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_ne_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xdd,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_ne_u64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_ne_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xdd,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_ne_u64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_ne_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xdd,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_ne_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xdd,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_ne_u64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_ne_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xdd,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_ne_u64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_ne_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xdd,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_ne_u64_e64 exec, src_scc
+// GFX1250: v_cmpx_ne_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xdd,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_ne_u64_e64 null, 0.5
+// GFX1250: v_cmpx_ne_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xdd,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_ne_u64_e64 -1, -1
+// GFX1250: v_cmpx_ne_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xdd,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_ne_u64_e64 0.5, null
+// GFX1250: v_cmpx_ne_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xdd,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_ne_u64_e64 src_scc, exec
+// GFX1250: v_cmpx_ne_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xdd,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_ne_u64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_ne_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xdd,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_neq_f16_e64 v1, v2
+// GFX1250: v_cmpx_neq_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x8d,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_neq_f16_e64 v255, v255
+// GFX1250: v_cmpx_neq_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x8d,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_neq_f16_e64 s1, s2
+// GFX1250: v_cmpx_neq_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x8d,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_neq_f16_e64 s105, s105
+// GFX1250: v_cmpx_neq_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x8d,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_neq_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_neq_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x8d,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_neq_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_neq_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x8d,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_neq_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_neq_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x8d,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_neq_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_neq_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x8d,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_neq_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_neq_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x8d,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_neq_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_neq_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x8d,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_neq_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_neq_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x8d,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_neq_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_neq_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x8d,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_neq_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_neq_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x8d,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_neq_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_neq_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x8d,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_neq_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_neq_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x8d,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_neq_f32_e64 v1, v2
+// GFX1250: v_cmpx_neq_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x9d,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_neq_f32_e64 v255, v255
+// GFX1250: v_cmpx_neq_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x9d,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_neq_f32_e64 s1, s2
+// GFX1250: v_cmpx_neq_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x9d,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s105, s105
+// GFX1250: v_cmpx_neq_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x9d,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_neq_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_neq_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x9d,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_neq_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_neq_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x9d,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_neq_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_neq_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x9d,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_neq_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_neq_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x9d,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_neq_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_neq_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x9d,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_neq_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_neq_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x9d,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_neq_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_neq_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x9d,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_neq_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_neq_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x9d,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_neq_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_neq_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x9d,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_neq_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_neq_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x9d,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_neq_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_neq_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x9d,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_neq_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_neq_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xad,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_neq_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_neq_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xad,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_neq_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_neq_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xad,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_neq_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xad,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_neq_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_neq_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xad,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_neq_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_neq_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xad,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_neq_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_neq_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xad,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_neq_f64_e64 null, 0.5
+// GFX1250: v_cmpx_neq_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xad,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_neq_f64_e64 -1, -1
+// GFX1250: v_cmpx_neq_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xad,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_neq_f64_e64 0.5, null
+// GFX1250: v_cmpx_neq_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xad,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_neq_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_neq_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xad,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_neq_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_neq_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xad,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nge_f16_e64 v1, v2
+// GFX1250: v_cmpx_nge_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x89,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_nge_f16_e64 v255, v255
+// GFX1250: v_cmpx_nge_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x89,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_nge_f16_e64 s1, s2
+// GFX1250: v_cmpx_nge_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x89,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_nge_f16_e64 s105, s105
+// GFX1250: v_cmpx_nge_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x89,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_nge_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_nge_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x89,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_nge_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_nge_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x89,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_nge_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_nge_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x89,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_nge_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_nge_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x89,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_nge_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_nge_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x89,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_nge_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_nge_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x89,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_nge_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_nge_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x89,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_nge_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_nge_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x89,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_nge_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_nge_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x89,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_nge_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_nge_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x89,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_nge_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_nge_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x89,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_nge_f32_e64 v1, v2
+// GFX1250: v_cmpx_nge_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x99,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_nge_f32_e64 v255, v255
+// GFX1250: v_cmpx_nge_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x99,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_nge_f32_e64 s1, s2
+// GFX1250: v_cmpx_nge_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x99,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s105, s105
+// GFX1250: v_cmpx_nge_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x99,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_nge_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_nge_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x99,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_nge_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_nge_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x99,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nge_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_nge_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x99,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_nge_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_nge_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x99,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_nge_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_nge_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x99,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_nge_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_nge_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x99,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_nge_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_nge_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x99,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_nge_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_nge_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x99,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_nge_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_nge_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x99,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_nge_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_nge_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x99,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_nge_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_nge_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x99,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nge_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_nge_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa9,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_nge_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_nge_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa9,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_nge_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_nge_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa9,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_nge_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa9,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_nge_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_nge_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa9,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_nge_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_nge_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa9,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nge_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_nge_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa9,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_nge_f64_e64 null, 0.5
+// GFX1250: v_cmpx_nge_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa9,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_nge_f64_e64 -1, -1
+// GFX1250: v_cmpx_nge_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa9,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_nge_f64_e64 0.5, null
+// GFX1250: v_cmpx_nge_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa9,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_nge_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_nge_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa9,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_nge_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_nge_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa9,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ngt_f16_e64 v1, v2
+// GFX1250: v_cmpx_ngt_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x8b,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ngt_f16_e64 v255, v255
+// GFX1250: v_cmpx_ngt_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x8b,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ngt_f16_e64 s1, s2
+// GFX1250: v_cmpx_ngt_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x8b,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 s105, s105
+// GFX1250: v_cmpx_ngt_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x8b,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ngt_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x8b,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_ngt_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x8b,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ngt_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x8b,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ngt_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_ngt_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x8b,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ngt_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_ngt_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x8b,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ngt_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_ngt_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x8b,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_ngt_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x8b,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_ngt_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x8b,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_ngt_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x8b,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_ngt_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_ngt_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x8b,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_ngt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_ngt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x8b,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 v1, v2
+// GFX1250: v_cmpx_ngt_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x9b,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ngt_f32_e64 v255, v255
+// GFX1250: v_cmpx_ngt_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x9b,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ngt_f32_e64 s1, s2
+// GFX1250: v_cmpx_ngt_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x9b,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s105, s105
+// GFX1250: v_cmpx_ngt_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x9b,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ngt_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x9b,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_ngt_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x9b,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ngt_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ngt_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x9b,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ngt_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_ngt_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x9b,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ngt_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_ngt_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x9b,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ngt_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_ngt_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x9b,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_ngt_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x9b,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_ngt_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x9b,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_ngt_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x9b,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_ngt_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_ngt_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x9b,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_ngt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_ngt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x9b,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ngt_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_ngt_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xab,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_ngt_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_ngt_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xab,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_ngt_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_ngt_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xab,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_ngt_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xab,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_ngt_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xab,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_ngt_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xab,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ngt_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_ngt_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xab,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_ngt_f64_e64 null, 0.5
+// GFX1250: v_cmpx_ngt_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xab,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_ngt_f64_e64 -1, -1
+// GFX1250: v_cmpx_ngt_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xab,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_ngt_f64_e64 0.5, null
+// GFX1250: v_cmpx_ngt_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xab,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_ngt_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xab,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_ngt_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_ngt_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xab,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nle_f16_e64 v1, v2
+// GFX1250: v_cmpx_nle_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x8c,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_nle_f16_e64 v255, v255
+// GFX1250: v_cmpx_nle_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x8c,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_nle_f16_e64 s1, s2
+// GFX1250: v_cmpx_nle_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x8c,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_nle_f16_e64 s105, s105
+// GFX1250: v_cmpx_nle_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x8c,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_nle_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_nle_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x8c,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_nle_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_nle_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x8c,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_nle_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_nle_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x8c,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_nle_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_nle_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x8c,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_nle_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_nle_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x8c,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_nle_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_nle_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x8c,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_nle_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_nle_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x8c,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_nle_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_nle_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x8c,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_nle_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_nle_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x8c,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_nle_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_nle_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x8c,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_nle_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_nle_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x8c,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_nle_f32_e64 v1, v2
+// GFX1250: v_cmpx_nle_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x9c,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_nle_f32_e64 v255, v255
+// GFX1250: v_cmpx_nle_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x9c,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_nle_f32_e64 s1, s2
+// GFX1250: v_cmpx_nle_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x9c,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s105, s105
+// GFX1250: v_cmpx_nle_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x9c,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_nle_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_nle_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x9c,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_nle_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_nle_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x9c,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nle_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_nle_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x9c,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_nle_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_nle_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x9c,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_nle_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_nle_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x9c,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_nle_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_nle_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x9c,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_nle_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_nle_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x9c,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_nle_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_nle_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x9c,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_nle_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_nle_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x9c,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_nle_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_nle_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x9c,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_nle_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_nle_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x9c,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nle_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_nle_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xac,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_nle_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_nle_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xac,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_nle_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_nle_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xac,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_nle_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xac,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_nle_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_nle_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xac,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_nle_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_nle_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xac,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nle_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_nle_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xac,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_nle_f64_e64 null, 0.5
+// GFX1250: v_cmpx_nle_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xac,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_nle_f64_e64 -1, -1
+// GFX1250: v_cmpx_nle_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xac,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_nle_f64_e64 0.5, null
+// GFX1250: v_cmpx_nle_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xac,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_nle_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_nle_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xac,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_nle_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_nle_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xac,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlg_f16_e64 v1, v2
+// GFX1250: v_cmpx_nlg_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x8a,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_nlg_f16_e64 v255, v255
+// GFX1250: v_cmpx_nlg_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x8a,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_nlg_f16_e64 s1, s2
+// GFX1250: v_cmpx_nlg_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x8a,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 s105, s105
+// GFX1250: v_cmpx_nlg_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x8a,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_nlg_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x8a,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_nlg_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x8a,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_nlg_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x8a,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_nlg_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_nlg_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x8a,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_nlg_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_nlg_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x8a,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_nlg_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_nlg_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x8a,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_nlg_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x8a,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_nlg_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x8a,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_nlg_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x8a,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_nlg_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_nlg_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x8a,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_nlg_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_nlg_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x8a,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 v1, v2
+// GFX1250: v_cmpx_nlg_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x9a,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_nlg_f32_e64 v255, v255
+// GFX1250: v_cmpx_nlg_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x9a,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_nlg_f32_e64 s1, s2
+// GFX1250: v_cmpx_nlg_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x9a,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s105, s105
+// GFX1250: v_cmpx_nlg_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x9a,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_nlg_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x9a,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_nlg_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x9a,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlg_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_nlg_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x9a,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_nlg_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_nlg_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x9a,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_nlg_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_nlg_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x9a,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_nlg_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_nlg_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x9a,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_nlg_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x9a,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_nlg_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x9a,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_nlg_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x9a,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_nlg_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_nlg_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x9a,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_nlg_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_nlg_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x9a,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlg_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_nlg_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xaa,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_nlg_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_nlg_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xaa,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_nlg_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_nlg_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xaa,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_nlg_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xaa,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_nlg_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xaa,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_nlg_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xaa,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlg_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_nlg_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xaa,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_nlg_f64_e64 null, 0.5
+// GFX1250: v_cmpx_nlg_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xaa,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_nlg_f64_e64 -1, -1
+// GFX1250: v_cmpx_nlg_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xaa,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_nlg_f64_e64 0.5, null
+// GFX1250: v_cmpx_nlg_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xaa,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_nlg_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xaa,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_nlg_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_nlg_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xaa,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlt_f16_e64 v1, v2
+// GFX1250: v_cmpx_nlt_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x8e,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_nlt_f16_e64 v255, v255
+// GFX1250: v_cmpx_nlt_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x8e,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_nlt_f16_e64 s1, s2
+// GFX1250: v_cmpx_nlt_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x8e,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 s105, s105
+// GFX1250: v_cmpx_nlt_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x8e,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_nlt_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x8e,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_nlt_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x8e,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_nlt_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x8e,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_nlt_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_nlt_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x8e,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_nlt_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_nlt_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x8e,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_nlt_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_nlt_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x8e,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_nlt_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x8e,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_nlt_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x8e,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_nlt_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x8e,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_nlt_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_nlt_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x8e,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_nlt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_nlt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x8e,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 v1, v2
+// GFX1250: v_cmpx_nlt_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x9e,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_nlt_f32_e64 v255, v255
+// GFX1250: v_cmpx_nlt_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x9e,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_nlt_f32_e64 s1, s2
+// GFX1250: v_cmpx_nlt_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x9e,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s105, s105
+// GFX1250: v_cmpx_nlt_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x9e,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_nlt_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x9e,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_nlt_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x9e,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlt_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_nlt_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x9e,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_nlt_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_nlt_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x9e,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_nlt_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_nlt_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x9e,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_nlt_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_nlt_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x9e,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_nlt_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x9e,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_nlt_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x9e,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_nlt_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x9e,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_nlt_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_nlt_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x9e,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_nlt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_nlt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x9e,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlt_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_nlt_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xae,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_nlt_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_nlt_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xae,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_nlt_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_nlt_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xae,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_nlt_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xae,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_nlt_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xae,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_nlt_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xae,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlt_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_nlt_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xae,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_nlt_f64_e64 null, 0.5
+// GFX1250: v_cmpx_nlt_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xae,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_nlt_f64_e64 -1, -1
+// GFX1250: v_cmpx_nlt_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xae,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_nlt_f64_e64 0.5, null
+// GFX1250: v_cmpx_nlt_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xae,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_nlt_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xae,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_nlt_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_nlt_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xae,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_o_f16_e64 v1, v2
+// GFX1250: v_cmpx_o_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x87,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_o_f16_e64 v255, v255
+// GFX1250: v_cmpx_o_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x87,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_o_f16_e64 s1, s2
+// GFX1250: v_cmpx_o_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x87,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_o_f16_e64 s105, s105
+// GFX1250: v_cmpx_o_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x87,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_o_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_o_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x87,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_o_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_o_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x87,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_o_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_o_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x87,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_o_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_o_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x87,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_o_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_o_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x87,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_o_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_o_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x87,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_o_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_o_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x87,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_o_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_o_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x87,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_o_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_o_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x87,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_o_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_o_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x87,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_o_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_o_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x87,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_o_f32_e64 v1, v2
+// GFX1250: v_cmpx_o_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x97,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_o_f32_e64 v255, v255
+// GFX1250: v_cmpx_o_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x97,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_o_f32_e64 s1, s2
+// GFX1250: v_cmpx_o_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x97,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_o_f32_e64 s105, s105
+// GFX1250: v_cmpx_o_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x97,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_o_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_o_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x97,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_o_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_o_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x97,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_o_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_o_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x97,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_o_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_o_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x97,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_o_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_o_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x97,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_o_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_o_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x97,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_o_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_o_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x97,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_o_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_o_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x97,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_o_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_o_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x97,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_o_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_o_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x97,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_o_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_o_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x97,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_o_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_o_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa7,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_o_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_o_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa7,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_o_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_o_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa7,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_o_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa7,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_o_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_o_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa7,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_o_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_o_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa7,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_o_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_o_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa7,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_o_f64_e64 null, 0.5
+// GFX1250: v_cmpx_o_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa7,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_o_f64_e64 -1, -1
+// GFX1250: v_cmpx_o_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa7,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_o_f64_e64 0.5, null
+// GFX1250: v_cmpx_o_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa7,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_o_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_o_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa7,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_o_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_o_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa7,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_u_f16_e64 v1, v2
+// GFX1250: v_cmpx_u_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x88,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_u_f16_e64 v255, v255
+// GFX1250: v_cmpx_u_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x88,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_u_f16_e64 s1, s2
+// GFX1250: v_cmpx_u_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x88,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_u_f16_e64 s105, s105
+// GFX1250: v_cmpx_u_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x88,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_u_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_u_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x88,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_u_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_u_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x88,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_u_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_u_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x88,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_u_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_u_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x88,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_u_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_u_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x88,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_u_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_u_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x88,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_u_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_u_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x88,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_u_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_u_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x88,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_u_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_u_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x88,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_u_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_u_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x88,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_u_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_u_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x88,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_u_f32_e64 v1, v2
+// GFX1250: v_cmpx_u_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x98,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_u_f32_e64 v255, v255
+// GFX1250: v_cmpx_u_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x98,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_u_f32_e64 s1, s2
+// GFX1250: v_cmpx_u_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x98,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_u_f32_e64 s105, s105
+// GFX1250: v_cmpx_u_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x98,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_u_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_u_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x98,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_u_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_u_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x98,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_u_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_u_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x98,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_u_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_u_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x98,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_u_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_u_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x98,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_u_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_u_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x98,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_u_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_u_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x98,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_u_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_u_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x98,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_u_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_u_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x98,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_u_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_u_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x98,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_u_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_u_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x98,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_u_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_u_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa8,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_u_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_u_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa8,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_u_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_u_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa8,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_u_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa8,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_u_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_u_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa8,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_u_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_u_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa8,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_u_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_u_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa8,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_u_f64_e64 null, 0.5
+// GFX1250: v_cmpx_u_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa8,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_u_f64_e64 -1, -1
+// GFX1250: v_cmpx_u_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa8,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_u_f64_e64 0.5, null
+// GFX1250: v_cmpx_u_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa8,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_u_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_u_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa8,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_u_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_u_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa8,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_dpp16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_dpp16.s
new file mode 100644
index 0000000..2875d3e
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_dpp16.s
@@ -0,0 +1,14 @@
+// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding < %s | FileCheck --check-prefix=GFX1250 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX12-ERR --implicit-check-not=error: --strict-whitespace %s
+
+v_fma_mix_f32_bf16 v0, v1, v2, v3 op_sel:[0,0,0] row_ror:7 bank_mask:0x1 bound_ctrl:0
+// GFX1250: v_fma_mix_f32_bf16_e64_dpp v0, v1, v2, v3 row_ror:7 row_mask:0xf bank_mask:0x1 ; encoding: [0x00,0x00,0x3d,0xcc,0xfa,0x04,0x0e,0x04,0x01,0x27,0x01,0xf1]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_fma_mixlo_bf16 v0, v1, v2, v3 op_sel_hi:[1,1,1] clamp quad_perm:[0,2,3,1] row_mask:0x0
+// GFX1250: v_fma_mixlo_bf16_e64_dpp v0, v1, v2, v3 op_sel_hi:[1,1,1] clamp quad_perm:[0,2,3,1] row_mask:0x0 bank_mask:0xf ; encoding: [0x00,0xc0,0x3e,0xcc,0xfa,0x04,0x0e,0x1c,0x01,0x78,0x00,0x0f]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_fma_mixhi_bf16 v0, v1, v2, v3 op_sel_hi:[1,1,1] clamp quad_perm:[0,2,3,1] row_mask:0x0
+// GFX1250: v_fma_mixhi_bf16_e64_dpp v0, v1, v2, v3 op_sel_hi:[1,1,1] clamp quad_perm:[0,2,3,1] row_mask:0x0 bank_mask:0xf ; encoding: [0x00,0xc0,0x3f,0xcc,0xfa,0x04,0x0e,0x1c,0x01,0x78,0x00,0x0f]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_dpp8.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_dpp8.s
new file mode 100644
index 0000000..13b8e21
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_dpp8.s
@@ -0,0 +1,26 @@
+// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding < %s | FileCheck --check-prefix=GFX1250 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX12-ERR --implicit-check-not=error: --strict-whitespace %s
+
+v_fma_mix_f32_bf16 v0, v1, v2, v3 dpp8:[2,2,2,2,4,4,4,4]
+// GFX1250: v_fma_mix_f32_bf16_e64_dpp v0, v1, v2, v3 dpp8:[2,2,2,2,4,4,4,4] ; encoding: [0x00,0x00,0x3d,0xcc,0xe9,0x04,0x0e,0x04,0x01,0x92,0x44,0x92]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_fma_mix_f32_bf16 v0, v1, v2, v3 clamp dpp8:[2,2,2,2,4,4,4,4] fi:1
+// GFX1250: v_fma_mix_f32_bf16_e64_dpp v0, v1, v2, v3 clamp dpp8:[2,2,2,2,4,4,4,4] fi:1 ; encoding: [0x00,0x80,0x3d,0xcc,0xea,0x04,0x0e,0x04,0x01,0x92,0x44,0x92]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_fma_mixlo_bf16 v0, abs(v1), -v2, abs(v3) dpp8:[2,2,2,2,4,4,4,4]
+// GFX1250: v_fma_mixlo_bf16_e64_dpp v0, |v1|, -v2, |v3| dpp8:[2,2,2,2,4,4,4,4] ; encoding: [0x00,0x05,0x3e,0xcc,0xe9,0x04,0x0e,0x44,0x01,0x92,0x44,0x92]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_fma_mixlo_bf16 v0, abs(v1), -v2, abs(v3) op_sel:[1,0,0] op_sel_hi:[1,0,0] dpp8:[2,2,2,2,4,4,4,4]
+// GFX1250: v_fma_mixlo_bf16_e64_dpp v0, |v1|, -v2, |v3| op_sel:[1,0,0] op_sel_hi:[1,0,0] dpp8:[2,2,2,2,4,4,4,4] ; encoding: [0x00,0x0d,0x3e,0xcc,0xe9,0x04,0x0e,0x4c,0x01,0x92,0x44,0x92]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_fma_mixhi_bf16 v0, abs(v1), -v2, abs(v3) dpp8:[2,2,2,2,4,4,4,4]
+// GFX1250: v_fma_mixhi_bf16_e64_dpp v0, |v1|, -v2, |v3| dpp8:[2,2,2,2,4,4,4,4] ; encoding: [0x00,0x05,0x3f,0xcc,0xe9,0x04,0x0e,0x44,0x01,0x92,0x44,0x92]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_fma_mixhi_bf16 v0, abs(v1), -v2, abs(v3) op_sel:[1,0,0] op_sel_hi:[1,0,0] dpp8:[2,2,2,2,4,4,4,4]
+// GFX1250: v_fma_mixhi_bf16_e64_dpp v0, |v1|, -v2, |v3| op_sel:[1,0,0] op_sel_hi:[1,0,0] dpp8:[2,2,2,2,4,4,4,4] ; encoding: [0x00,0x0d,0x3f,0xcc,0xe9,0x04,0x0e,0x4c,0x01,0x92,0x44,0x92]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vsample_err.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vsample_err.s
new file mode 100644
index 0000000..50766f13
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vsample_err.s
@@ -0,0 +1,175 @@
+; RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX1250-ERR --implicit-check-not=error: --strict-whitespace %s
+
+image_sample v64, v32, s[4:11], s[100:103] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_d v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_l v64, [v32, v33], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_b v64, [v32, v33], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_lz v64, v32, s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c v64, [v32, v33], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_d v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_l v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_b v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_lz v64, [v32, v33], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_o v64, [v32, v33], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_d_o v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_l_o v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_b_o v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_lz_o v64, [v32, v33], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_o v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_d_o v64, [v32, v33, v34, v[35:36]], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_l_o v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_b_o v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_lz_o v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4 v[64:67], [v32, v33], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_l v[64:67], [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_b v[64:67], [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_lz v[64:67], [v32, v33], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_c v[64:67], [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_c_lz v[64:67], [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_o v[64:67], [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_lz_o v[64:67], [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_c_lz_o v[64:67], [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_get_lod v64, v32, s[4:11], s[100:103] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_d_g16 v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_d_g16 v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_d_o_g16 v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_d_o_g16 v64, [v32, v33, v34, v[35:36]], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_cl v64, [v32, v33], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_d_cl v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_b_cl v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_cl v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_d_cl v64, [v32, v33, v34, v[35:36]], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_b_cl v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_cl_o v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_d_cl_o v64, [v32, v33, v34, v[35:36]], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_b_cl_o v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_cl_o v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_d_cl_o v64, [v32, v33, v34, v[35:37]], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_b_cl_o v64, [v32, v33, v34, v[35:36]], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_d_cl_g16 v64, [v32, v33, v34, v[35:36]], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_d_cl_o_g16 v64, [v32, v33, v34, v[35:36]], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_d_cl_o_g16 v64, [v32, v33, v34, v[35:37]], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_d_cl_g16 v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_cl v[64:67], [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_b_cl v[64:67], [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_c_cl v[64:67], [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_c_l v[64:67], [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_c_b v[64:67], [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_c_b_cl v[64:67], [v32, v33, v34, v[35:36]], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4h v[64:67], [v32, v33], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_msaa_load v[1:4], [v5, v6, v7], s[8:15] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_wmma_w32.s b/llvm/test/MC/AMDGPU/gfx1250_asm_wmma_w32.s
index 93e65d3..8185b77 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_wmma_w32.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_wmma_w32.s
@@ -1737,3 +1737,173 @@ v_wmma_f32_32x16x128_f4 v[4:19], v[0:15], v[2:9], v[4:19] neg_lo:[0,0,1] neg_hi:
// GFX1250: v_wmma_f32_32x16x128_f4 v[4:19], v[0:15], v[2:9], v[4:19] neg_lo:[0,0,1] neg_hi:[0,0,1] ; encoding: [0x04,0x44,0x88,0xcc,0x00,0x05,0x12,0x9c]
// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1]
+// GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1] ; encoding: [0x00,0x08,0x35,0xcc,0x01,0x05,0x02,0x08,0x00,0x44,0x88,0xcc,0x08,0x31,0xa2,0x9c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], s1, s2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse matrix_b_reuse neg_lo:[0,0,1] neg_hi:[0,0,1]
+// GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], s1, s2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse matrix_b_reuse neg_lo:[0,0,1] neg_hi:[0,0,1] ; encoding: [0x00,0x68,0x35,0xcc,0x01,0x04,0x00,0x08,0x00,0x44,0x88,0xcc,0x08,0x31,0xa2,0x9c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0
+// GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 ; encoding: [0x00,0x00,0x35,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 matrix_a_scale:MATRIX_SCALE_ROW0
+// GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 ; encoding: [0x00,0x00,0x35,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 matrix_a_scale:MATRIX_SCALE_ROW1
+// GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 matrix_a_scale:MATRIX_SCALE_ROW1 ; encoding: [0x00,0x08,0x35,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 matrix_a_reuse
+// GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 matrix_a_reuse ; encoding: [0x00,0x20,0x35,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_reuse
+// GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_reuse ; encoding: [0x00,0x28,0x35,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 matrix_b_scale:MATRIX_SCALE_ROW0
+// GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 ; encoding: [0x00,0x00,0x35,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 matrix_b_scale:MATRIX_SCALE_ROW1
+// GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 matrix_b_scale:MATRIX_SCALE_ROW1 ; encoding: [0x00,0x00,0x35,0xcc,0x00,0x00,0x00,0x08,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 matrix_b_reuse
+// GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 matrix_b_reuse ; encoding: [0x00,0x40,0x35,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_b_reuse
+// GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_b_reuse ; encoding: [0x00,0x40,0x35,0xcc,0x00,0x00,0x00,0x08,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E8 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E8
+// GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 ; encoding: [0x00,0x00,0x35,0xcc,0x01,0x05,0x02,0x00,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3
+// GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3 ; encoding: [0x00,0x00,0x35,0xcc,0x01,0x05,0x02,0x20,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3
+// GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3 ; encoding: [0x00,0x00,0x35,0xcc,0x01,0x05,0x02,0x40,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3
+// GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3 ; encoding: [0x00,0x01,0x35,0xcc,0x01,0x05,0x02,0x00,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3
+// GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3 ; encoding: [0x00,0x02,0x35,0xcc,0x01,0x05,0x02,0x00,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E8 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E8 matrix_a_reuse matrix_b_reuse neg_lo:[0,0,1] neg_hi:[0,0,1]
+// GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse matrix_b_reuse neg_lo:[0,0,1] neg_hi:[0,0,1] ; encoding: [0x00,0x68,0x35,0xcc,0x01,0x05,0x02,0x08,0x00,0x44,0x88,0xcc,0x08,0x31,0xa2,0x9c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[2:3], v[4:5] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1]
+// GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[2:3], v[4:5] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1] ; encoding: [0x00,0x08,0x3a,0xcc,0x02,0x09,0x02,0x08,0x00,0x44,0x88,0xcc,0x08,0x31,0xa2,0x9c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], s[2:3], s[4:5] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse matrix_b_reuse neg_lo:[0,0,1] neg_hi:[0,0,1]
+// GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], s[2:3], s[4:5] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse matrix_b_reuse neg_lo:[0,0,1] neg_hi:[0,0,1] ; encoding: [0x00,0x68,0x3a,0xcc,0x02,0x08,0x00,0x08,0x00,0x44,0x88,0xcc,0x08,0x31,0xa2,0x9c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1]
+// GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x3a,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] matrix_a_scale:MATRIX_SCALE_ROW0
+// GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x3a,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] matrix_a_scale:MATRIX_SCALE_ROW1
+// GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] matrix_a_scale:MATRIX_SCALE_ROW1 ; encoding: [0x00,0x08,0x3a,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] matrix_a_reuse
+// GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] matrix_a_reuse ; encoding: [0x00,0x20,0x3a,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_reuse
+// GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_reuse ; encoding: [0x00,0x28,0x3a,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] matrix_b_scale:MATRIX_SCALE_ROW0
+// GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x3a,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] matrix_b_scale:MATRIX_SCALE_ROW1
+// GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] matrix_b_scale:MATRIX_SCALE_ROW1 ; encoding: [0x00,0x00,0x3a,0xcc,0x00,0x00,0x00,0x08,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] matrix_b_reuse
+// GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] matrix_b_reuse ; encoding: [0x00,0x40,0x3a,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] matrix_b_scale:MATRIX_SCALE_ROW1 matrix_b_reuse
+// GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] matrix_b_scale:MATRIX_SCALE_ROW1 matrix_b_reuse ; encoding: [0x00,0x40,0x3a,0xcc,0x00,0x00,0x00,0x08,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[2:3], v[4:5] matrix_a_scale_fmt:MATRIX_SCALE_FMT_E8 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E8
+// GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[2:3], v[4:5] ; encoding: [0x00,0x00,0x3a,0xcc,0x02,0x09,0x02,0x00,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[2:3], v[4:5] matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3
+// GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[2:3], v[4:5] matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3 ; encoding: [0x00,0x00,0x3a,0xcc,0x02,0x09,0x02,0x20,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[2:3], v[4:5] matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3
+// GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[2:3], v[4:5] matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3 ; encoding: [0x00,0x00,0x3a,0xcc,0x02,0x09,0x02,0x40,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[2:3], v[4:5] matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3
+// GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[2:3], v[4:5] matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3 ; encoding: [0x00,0x01,0x3a,0xcc,0x02,0x09,0x02,0x00,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[2:3], v[4:5] matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3
+// GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[2:3], v[4:5] matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3 ; encoding: [0x00,0x02,0x3a,0xcc,0x02,0x09,0x02,0x00,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
+
+v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[2:3], v[4:5] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E8 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E8 matrix_a_reuse matrix_b_reuse neg_lo:[0,0,1] neg_hi:[0,0,1]
+// GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[2:3], v[4:5] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse matrix_b_reuse neg_lo:[0,0,1] neg_hi:[0,0,1] ; encoding: [0x00,0x68,0x3a,0xcc,0x02,0x09,0x02,0x08,0x00,0x44,0x88,0xcc,0x08,0x31,0xa2,0x9c]
+// WAVESIZE-ERR: :[[@LINE-2]]:1: error: instruction requires wavesize=32
+// GFX12-ERR: :[[@LINE-3]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_wmma_w32_err.s b/llvm/test/MC/AMDGPU/gfx1250_asm_wmma_w32_err.s
index 1eae8f6..41cac9d 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_wmma_w32_err.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_wmma_w32_err.s
@@ -449,6 +449,16 @@ v_wmma_f32_16x16x128_f8f6f4 v[0:7], v[0:15], v[20:35], v[40:47] matrix_b_fmt:MAT
// GFX1250-ERR-NEXT: {{^}}v_wmma_f32_16x16x128_f8f6f4 v[0:7], v[0:15], v[20:35], v[40:47] matrix_b_fmt:MATRIX_FMT_FP4
// GFX1250-ERR-NEXT: {{^}} ^
+v_wmma_scale_f32_16x16x128_f8f6f4 v[0:7], v[8:23], v[24:31], v[40:47], v1, v2
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: wrong register tuple size for MATRIX_FMT_FP8
+// GFX1250-ERR-NEXT: {{^}}v_wmma_scale_f32_16x16x128_f8f6f4 v[0:7], v[8:23], v[24:31], v[40:47], v1, v2
+// GFX1250-ERR-NEXT: {{^}} ^
+
+v_wmma_scale16_f32_16x16x128_f8f6f4 v[0:7], v[0:7], v[0:15], v[0:7], s[0:1], s[0:1] matrix_a_fmt:MATRIX_FMT_FP6
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: wrong register tuple size for MATRIX_FMT_FP6
+// GFX1250-ERR-NEXT: {{^}}v_wmma_scale16_f32_16x16x128_f8f6f4 v[0:7], v[0:7], v[0:15], v[0:7], s[0:1], s[0:1] matrix_a_fmt:MATRIX_FMT_FP6
+// GFX1250-ERR-NEXT: {{^}} ^
+
v_wmma_f32_32x16x128_f4 v[4:19], v[0:15], v[2:9], v[4:19] neg_lo:[1,0,0]
// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid neg_lo operand
// GFX1250-ERR-NEXT: {{^}}v_wmma_f32_32x16x128_f4 v[4:19], v[0:15], v[2:9], v[4:19] neg_lo:[1,0,0]
@@ -468,3 +478,23 @@ v_wmma_f32_32x16x128_f4 v[4:19], v[0:15], v[2:9], v[4:19] neg_hi:[0,1,0]
// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid neg_hi operand
// GFX1250-ERR-NEXT: {{^}}v_wmma_f32_32x16x128_f4 v[4:19], v[0:15], v[2:9], v[4:19] neg_hi:[0,1,0]
// GFX1250-ERR-NEXT: {{^}} ^
+
+v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 neg_lo:[1,0,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid neg_lo operand
+// GFX1250-ERR-NEXT: {{^}}v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 neg_lo:[1,0,0]
+// GFX1250-ERR-NEXT: {{^}} ^
+
+v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 matrix_a_fmt:0
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR-NEXT: {{^}}v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 matrix_a_fmt:0
+// GFX1250-ERR-NEXT: {{^}} ^
+
+v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[0:1], v[2:3] neg_lo:[1,0,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid neg_lo operand
+// GFX1250-ERR-NEXT: {{^}}v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[0:1], v[2:3] neg_lo:[1,0,0]
+// GFX1250-ERR-NEXT: {{^}} ^
+
+v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[0:1], v[2:3] matrix_a_fmt:0
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR-NEXT: {{^}}v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[0:1], v[2:3] matrix_a_fmt:0
+// GFX1250-ERR-NEXT: {{^}} ^
diff --git a/llvm/test/MC/AMDGPU/gfx1250_err.s b/llvm/test/MC/AMDGPU/gfx1250_err.s
index e4598fe..676eb48 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_err.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_err.s
@@ -1,5 +1,30 @@
// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s 2>&1 | FileCheck --check-prefixes=GFX1250-ERR --implicit-check-not=error: -strict-whitespace %s
+s_load_b32 s4, s[2:3], 10 th:TH_LOAD_NT th:TH_LOAD_NT
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR: s_load_b32 s4, s[2:3], 10 th:TH_LOAD_NT th:TH_LOAD_NT
+// GFX1250-ERR: ^
+
+s_load_b32 s4, s[2:3], 10 scope:SCOPE_SE scope:SCOPE_SE
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR: s_load_b32 s4, s[2:3], 10 scope:SCOPE_SE scope:SCOPE_SE
+// GFX1250-ERR: ^
+
+s_load_b32 s4, s[2:3], 10 nv nv
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR: s_load_b32 s4, s[2:3], 10 nv nv
+// GFX1250-ERR: ^
+
+v_mov_b64 v[4:5], v[2:3] quad_perm:[1,1,1,1]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR: v_mov_b64 v[4:5], v[2:3] quad_perm:[1,1,1,1]
+// GFX1250-ERR: ^
+
+v_mov_b64 v[4:5], v[2:3] dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR: v_mov_b64 v[4:5], v[2:3] dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250-ERR: ^
+
// For v_dual_cndmask_b32 use of the explicit src2 forces VOPD3 form even if it is vcc_lo.
// If src2 is omitted then it forces VOPD form. As a result a proper form of the instruction
// has to be used if the other component of the dual instruction cannot be used if that
@@ -137,6 +162,11 @@ v_fmaak_f64 v[4:5], 0x7e8, v[8:9], lit64(0x7e8)
// GFX1250-ERR: v_fmaak_f64 v[4:5], 0x7e8, v[8:9], lit64(0x7e8)
// GFX1250-ERR: ^
+v_pk_add_min_i16 v10, |v1|, v2, v3
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR: v_pk_add_min_i16 v10, |v1|, v2, v3
+// GFX1250-ERR: ^
+
v_pk_add_min_i16 v10, -v1, v2, v3
// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
// GFX1250-ERR: v_pk_add_min_i16 v10, -v1, v2, v3
diff --git a/llvm/test/MC/Disassembler/AArch64/armv9.6a-lsui.txt b/llvm/test/MC/Disassembler/AArch64/armv9.6a-lsui.txt
index 4cde11f..dc53a0b 100644
--- a/llvm/test/MC/Disassembler/AArch64/armv9.6a-lsui.txt
+++ b/llvm/test/MC/Disassembler/AArch64/armv9.6a-lsui.txt
@@ -249,75 +249,75 @@
# CHECK-NEXT: casplt x0, x1, x2, x3, [sp]
# CHECK-NEXT: caspalt x0, x1, x2, x3, [x4]
# CHECK-NEXT: caspalt x0, x1, x2, x3, [sp]
-# CHECK-NEXT: ldtadd w7, wzr, [x5]
-# CHECK-NEXT: ldtadd x9, xzr, [sp]
+# CHECK-NEXT: sttadd w7, [x5]
+# CHECK-NEXT: sttadd x9, [sp]
# CHECK-NEXT: ldtadda w7, wzr, [x5]
# CHECK-NEXT: ldtadda x9, xzr, [sp]
-# CHECK-NEXT: ldtaddl w7, wzr, [x5]
-# CHECK-NEXT: ldtaddl x9, xzr, [sp]
+# CHECK-NEXT: sttaddl w7, [x5]
+# CHECK-NEXT: sttaddl x9, [sp]
# CHECK-NEXT: ldtaddal w7, wzr, [x5]
# CHECK-NEXT: ldtaddal x9, xzr, [sp]
-# CHECK-NEXT: ldtclr w7, wzr, [x5]
-# CHECK-NEXT: ldtclr x9, xzr, [sp]
-# CHECK-NEXT: ldtclrl w7, wzr, [x5]
-# CHECK-NEXT: ldtclrl x9, xzr, [sp]
+# CHECK-NEXT: sttclr w7, [x5]
+# CHECK-NEXT: sttclr x9, [sp]
+# CHECK-NEXT: sttclrl w7, [x5]
+# CHECK-NEXT: sttclrl x9, [sp]
# CHECK-NEXT: ldtclra w7, wzr, [x5]
# CHECK-NEXT: ldtclra x9, xzr, [sp]
# CHECK-NEXT: ldtclral w7, wzr, [x5]
# CHECK-NEXT: ldtclral x9, xzr, [sp]
-# CHECK-NEXT: ldtset w7, wzr, [x5]
-# CHECK-NEXT: ldtset x9, xzr, [sp]
-# CHECK-NEXT: ldtsetl w7, wzr, [x5]
-# CHECK-NEXT: ldtsetl x9, xzr, [sp]
+# CHECK-NEXT: sttset w7, [x5]
+# CHECK-NEXT: sttset x9, [sp]
+# CHECK-NEXT: sttsetl w7, [x5]
+# CHECK-NEXT: sttsetl x9, [sp]
# CHECK-NEXT: ldtseta w7, wzr, [x5]
# CHECK-NEXT: ldtseta x9, xzr, [sp]
# CHECK-NEXT: ldtsetal w7, wzr, [x5]
# CHECK-NEXT: ldtsetal x9, xzr, [sp]
-# CHECK-NEXT: ldtadd w0, wzr, [x2]
-# CHECK-NEXT: ldtadd w2, wzr, [sp]
-# CHECK-NEXT: ldtadd x0, xzr, [x2]
-# CHECK-NEXT: ldtadd x2, xzr, [sp]
-# CHECK-NEXT: ldtadd w0, wzr, [x2]
-# CHECK-NEXT: ldtadd w2, wzr, [sp]
-# CHECK-NEXT: ldtadd x0, xzr, [x2]
-# CHECK-NEXT: ldtadd x2, xzr, [sp]
-# CHECK-NEXT: ldtadd w0, wzr, [x2]
-# CHECK-NEXT: ldtadd w2, wzr, [sp]
-# CHECK-NEXT: ldtadd x0, xzr, [x2]
-# CHECK-NEXT: ldtadd x2, xzr, [sp]
-# CHECK-NEXT: ldtadd w0, wzr, [x2]
-# CHECK-NEXT: ldtadd w2, wzr, [sp]
-# CHECK-NEXT: ldtadd x0, xzr, [x2]
-# CHECK-NEXT: ldtadd x2, xzr, [sp]
-# CHECK-NEXT: ldtclr w0, wzr, [x2]
-# CHECK-NEXT: ldtclr w2, wzr, [sp]
-# CHECK-NEXT: ldtclr x0, xzr, [x2]
-# CHECK-NEXT: ldtclr x2, xzr, [sp]
-# CHECK-NEXT: ldtclr w0, wzr, [x2]
-# CHECK-NEXT: ldtclr w2, wzr, [sp]
-# CHECK-NEXT: ldtclr x0, xzr, [x2]
-# CHECK-NEXT: ldtclr x2, xzr, [sp]
-# CHECK-NEXT: ldtclr w0, wzr, [x2]
-# CHECK-NEXT: ldtclr w2, wzr, [sp]
-# CHECK-NEXT: ldtclr x0, xzr, [x2]
-# CHECK-NEXT: ldtclr x2, xzr, [sp]
-# CHECK-NEXT: ldtclr w0, wzr, [x2]
-# CHECK-NEXT: ldtclr x2, xzr, [sp]
-# CHECK-NEXT: ldtclr x0, xzr, [x2]
-# CHECK-NEXT: ldtclr x2, xzr, [sp]
-# CHECK-NEXT: ldtset w0, wzr, [x2]
-# CHECK-NEXT: ldtset w2, wzr, [sp]
-# CHECK-NEXT: ldtset x0, xzr, [x2]
-# CHECK-NEXT: ldtset x2, xzr, [sp]
-# CHECK-NEXT: ldtset w0, wzr, [x2]
-# CHECK-NEXT: ldtset w2, wzr, [sp]
-# CHECK-NEXT: ldtset x0, xzr, [x2]
-# CHECK-NEXT: ldtset x2, xzr, [sp]
-# CHECK-NEXT: ldtset w0, wzr, [x2]
-# CHECK-NEXT: ldtset w2, wzr, [sp]
-# CHECK-NEXT: ldtset x0, xzr, [x2]
-# CHECK-NEXT: ldtset x2, xzr, [sp]
-# CHECK-NEXT: ldtset w0, wzr, [x2]
-# CHECK-NEXT: ldtset x2, xzr, [sp]
-# CHECK-NEXT: ldtset x0, xzr, [x2]
-# CHECK-NEXT: ldtset x2, xzr, [sp]
+# CHECK-NEXT: sttadd w0, [x2]
+# CHECK-NEXT: sttadd w2, [sp]
+# CHECK-NEXT: sttadd x0, [x2]
+# CHECK-NEXT: sttadd x2, [sp]
+# CHECK-NEXT: sttadd w0, [x2]
+# CHECK-NEXT: sttadd w2, [sp]
+# CHECK-NEXT: sttadd x0, [x2]
+# CHECK-NEXT: sttadd x2, [sp]
+# CHECK-NEXT: sttadd w0, [x2]
+# CHECK-NEXT: sttadd w2, [sp]
+# CHECK-NEXT: sttadd x0, [x2]
+# CHECK-NEXT: sttadd x2, [sp]
+# CHECK-NEXT: sttadd w0, [x2]
+# CHECK-NEXT: sttadd w2, [sp]
+# CHECK-NEXT: sttadd x0, [x2]
+# CHECK-NEXT: sttadd x2, [sp]
+# CHECK-NEXT: sttclr w0, [x2]
+# CHECK-NEXT: sttclr w2, [sp]
+# CHECK-NEXT: sttclr x0, [x2]
+# CHECK-NEXT: sttclr x2, [sp]
+# CHECK-NEXT: sttclr w0, [x2]
+# CHECK-NEXT: sttclr w2, [sp]
+# CHECK-NEXT: sttclr x0, [x2]
+# CHECK-NEXT: sttclr x2, [sp]
+# CHECK-NEXT: sttclr w0, [x2]
+# CHECK-NEXT: sttclr w2, [sp]
+# CHECK-NEXT: sttclr x0, [x2]
+# CHECK-NEXT: sttclr x2, [sp]
+# CHECK-NEXT: sttclr w0, [x2]
+# CHECK-NEXT: sttclr x2, [sp]
+# CHECK-NEXT: sttclr x0, [x2]
+# CHECK-NEXT: sttclr x2, [sp]
+# CHECK-NEXT: sttset w0, [x2]
+# CHECK-NEXT: sttset w2, [sp]
+# CHECK-NEXT: sttset x0, [x2]
+# CHECK-NEXT: sttset x2, [sp]
+# CHECK-NEXT: sttset w0, [x2]
+# CHECK-NEXT: sttset w2, [sp]
+# CHECK-NEXT: sttset x0, [x2]
+# CHECK-NEXT: sttset x2, [sp]
+# CHECK-NEXT: sttset w0, [x2]
+# CHECK-NEXT: sttset w2, [sp]
+# CHECK-NEXT: sttset x0, [x2]
+# CHECK-NEXT: sttset x2, [sp]
+# CHECK-NEXT: sttset w0, [x2]
+# CHECK-NEXT: sttset x2, [sp]
+# CHECK-NEXT: sttset x0, [x2]
+# CHECK-NEXT: sttset x2, [sp]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_ds.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_ds.txt
index 0870aa7..13440a0 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_ds.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_ds.txt
@@ -1,5 +1,1109 @@
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -disassemble -show-encoding < %s | FileCheck -strict-whitespace -check-prefix=GFX1250 %s
+# GFX1250: ds_add_f32 v1, v2 ; encoding: [0x00,0x00,0x54,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x54,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_add_f32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x54,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x54,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_add_f32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x54,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x54,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_add_rtn_f32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xe4,0xd9,0xff,0xff,0x00,0xff]
+0x04,0x00,0xe4,0xd9,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_add_rtn_f32 v5, v1, v2 ; encoding: [0x00,0x00,0xe4,0xd9,0x01,0x02,0x00,0x05]
+0x00,0x00,0xe4,0xd9,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_add_rtn_f32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xe4,0xd9,0x01,0x02,0x00,0x05]
+0xff,0xff,0xe4,0xd9,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_add_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x80,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0x80,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_add_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x80,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0x80,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_add_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x80,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0x80,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_add_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x80,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0x80,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_add_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x80,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0x80,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_add_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x80,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0x80,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_add_u32 v1, v2 ; encoding: [0x00,0x00,0x00,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x00,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_add_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x00,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x00,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_add_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x00,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x00,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_add_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x00,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x00,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_add_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x00,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x00,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_add_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x00,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x00,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_and_b32 v1, v2 ; encoding: [0x00,0x00,0x24,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x24,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_and_b32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x24,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x24,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_and_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x24,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x24,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_and_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x24,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x24,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_and_b64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x24,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x24,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_and_b64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x24,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x24,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_and_rtn_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xa4,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0xa4,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_and_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xa4,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0xa4,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_and_rtn_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xa4,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0xa4,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_and_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xa4,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0xa4,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_and_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xa4,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0xa4,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_and_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xa4,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0xa4,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_append v255 offset:4 ; encoding: [0x04,0x00,0xf8,0xd8,0x00,0x00,0x00,0xff]
+0x04,0x00,0xf8,0xd8,0x00,0x00,0x00,0xff
+
+# GFX1250: ds_append v5 ; encoding: [0x00,0x00,0xf8,0xd8,0x00,0x00,0x00,0x05]
+0x00,0x00,0xf8,0xd8,0x00,0x00,0x00,0x05
+
+# GFX1250: ds_append v5 offset:65535 ; encoding: [0xff,0xff,0xf8,0xd8,0x00,0x00,0x00,0x05]
+0xff,0xff,0xf8,0xd8,0x00,0x00,0x00,0x05
+
+# GFX1250: ds_bpermute_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xcc,0xda,0xff,0xff,0x00,0xff]
+0x04,0x00,0xcc,0xda,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_bpermute_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xcc,0xda,0x01,0x02,0x00,0x05]
+0x00,0x00,0xcc,0xda,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_bpermute_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xcc,0xda,0x01,0x02,0x00,0x05]
+0xff,0xff,0xcc,0xda,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_cmpstore_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x40,0xd8,0x01,0x02,0x03,0x00]
+0x00,0x00,0x40,0xd8,0x01,0x02,0x03,0x00
+
+# GFX1250: ds_cmpstore_b32 v1, v2, v3 offset:65535 ; encoding: [0xff,0xff,0x40,0xd8,0x01,0x02,0x03,0x00]
+0xff,0xff,0x40,0xd8,0x01,0x02,0x03,0x00
+
+# GFX1250: ds_cmpstore_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x40,0xd8,0xff,0xff,0xff,0x00]
+0x04,0x00,0x40,0xd8,0xff,0xff,0xff,0x00
+
+# GFX1250: ds_cmpstore_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x40,0xd9,0x01,0x02,0x04,0x00]
+0x00,0x00,0x40,0xd9,0x01,0x02,0x04,0x00
+
+# GFX1250: ds_cmpstore_b64 v1, v[2:3], v[4:5] offset:65535 ; encoding: [0xff,0xff,0x40,0xd9,0x01,0x02,0x04,0x00]
+0xff,0xff,0x40,0xd9,0x01,0x02,0x04,0x00
+
+# GFX1250: ds_cmpstore_b64 v255, v[254:255], v[254:255] offset:4 ; encoding: [0x04,0x00,0x40,0xd9,0xff,0xfe,0xfe,0x00]
+0x04,0x00,0x40,0xd9,0xff,0xfe,0xfe,0x00
+
+# GFX1250: ds_cmpstore_rtn_b32 v255, v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xc0,0xd8,0xff,0xff,0xff,0xff]
+0x04,0x00,0xc0,0xd8,0xff,0xff,0xff,0xff
+
+# GFX1250: ds_cmpstore_rtn_b32 v5, v1, v2, v3 ; encoding: [0x00,0x00,0xc0,0xd8,0x01,0x02,0x03,0x05]
+0x00,0x00,0xc0,0xd8,0x01,0x02,0x03,0x05
+
+# GFX1250: ds_cmpstore_rtn_b32 v5, v1, v2, v3 offset:65535 ; encoding: [0xff,0xff,0xc0,0xd8,0x01,0x02,0x03,0x05]
+0xff,0xff,0xc0,0xd8,0x01,0x02,0x03,0x05
+
+# GFX1250: ds_cmpstore_rtn_b64 v[254:255], v255, v[254:255], v[254:255] offset:4 ; encoding: [0x04,0x00,0xc0,0xd9,0xff,0xfe,0xfe,0xfe]
+0x04,0x00,0xc0,0xd9,0xff,0xfe,0xfe,0xfe
+
+# GFX1250: ds_cmpstore_rtn_b64 v[6:7], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xc0,0xd9,0x01,0x02,0x04,0x06]
+0x00,0x00,0xc0,0xd9,0x01,0x02,0x04,0x06
+
+# GFX1250: ds_cmpstore_rtn_b64 v[6:7], v1, v[2:3], v[4:5] offset:65535 ; encoding: [0xff,0xff,0xc0,0xd9,0x01,0x02,0x04,0x06]
+0xff,0xff,0xc0,0xd9,0x01,0x02,0x04,0x06
+
+# GFX1250: ds_cond_sub_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0xa0,0xda,0x01,0x02,0x00,0x05]
+0x00,0x00,0xa0,0xda,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_cond_sub_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xa0,0xda,0x01,0x02,0x00,0x05]
+0xff,0xff,0xa0,0xda,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_cond_sub_u32 v1, v2 ; encoding: [0x00,0x00,0x60,0xda,0x01,0x02,0x00,0x00]
+0x00,0x00,0x60,0xda,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_cond_sub_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x60,0xda,0x01,0x02,0x00,0x00]
+0xff,0xff,0x60,0xda,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_condxchg32_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xf8,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0xf8,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_condxchg32_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xf8,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0xf8,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_condxchg32_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xf8,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0xf8,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_consume v255 offset:4 ; encoding: [0x04,0x00,0xf4,0xd8,0x00,0x00,0x00,0xff]
+0x04,0x00,0xf4,0xd8,0x00,0x00,0x00,0xff
+
+# GFX1250: ds_consume v5 ; encoding: [0x00,0x00,0xf4,0xd8,0x00,0x00,0x00,0x05]
+0x00,0x00,0xf4,0xd8,0x00,0x00,0x00,0x05
+
+# GFX1250: ds_consume v5 offset:65535 ; encoding: [0xff,0xff,0xf4,0xd8,0x00,0x00,0x00,0x05]
+0xff,0xff,0xf4,0xd8,0x00,0x00,0x00,0x05
+
+# GFX1250: ds_dec_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x90,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0x90,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_dec_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x90,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0x90,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_dec_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x90,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0x90,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_dec_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x90,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0x90,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_dec_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x90,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0x90,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_dec_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x90,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0x90,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_dec_u32 v1, v2 ; encoding: [0x00,0x00,0x10,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x10,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_dec_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x10,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x10,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_dec_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x10,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x10,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_dec_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x10,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x10,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_dec_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x10,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x10,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_dec_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x10,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x10,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_inc_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x8c,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0x8c,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_inc_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x8c,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0x8c,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_inc_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x8c,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0x8c,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_inc_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x8c,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0x8c,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_inc_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x8c,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0x8c,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_inc_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x8c,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0x8c,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_inc_u32 v1, v2 ; encoding: [0x00,0x00,0x0c,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x0c,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_inc_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x0c,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x0c,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_inc_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x0c,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x0c,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_inc_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x0c,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x0c,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_inc_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x0c,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x0c,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_inc_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x0c,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x0c,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_load_2addr_b32 v[254:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xdc,0xd8,0xff,0x00,0x00,0xfe]
+0x10,0x01,0xdc,0xd8,0xff,0x00,0x00,0xfe
+
+# GFX1250: ds_load_2addr_b32 v[6:7], v1 ; encoding: [0x00,0x00,0xdc,0xd8,0x01,0x00,0x00,0x06]
+0x00,0x00,0xdc,0xd8,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_2addr_b32 v[6:7], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xdc,0xd8,0x01,0x00,0x00,0x06]
+0x7f,0xff,0xdc,0xd8,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_2addr_b64 v[252:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xdc,0xd9,0xff,0x00,0x00,0xfc]
+0x10,0x01,0xdc,0xd9,0xff,0x00,0x00,0xfc
+
+# GFX1250: ds_load_2addr_b64 v[6:9], v1 ; encoding: [0x00,0x00,0xdc,0xd9,0x01,0x00,0x00,0x06]
+0x00,0x00,0xdc,0xd9,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_2addr_b64 v[6:9], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xdc,0xd9,0x01,0x00,0x00,0x06]
+0x7f,0xff,0xdc,0xd9,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_2addr_stride64_b32 v[254:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xe0,0xd8,0xff,0x00,0x00,0xfe]
+0x10,0x01,0xe0,0xd8,0xff,0x00,0x00,0xfe
+
+# GFX1250: ds_load_2addr_stride64_b32 v[6:7], v1 ; encoding: [0x00,0x00,0xe0,0xd8,0x01,0x00,0x00,0x06]
+0x00,0x00,0xe0,0xd8,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_2addr_stride64_b32 v[6:7], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xe0,0xd8,0x01,0x00,0x00,0x06]
+0x7f,0xff,0xe0,0xd8,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_2addr_stride64_b64 v[252:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xe0,0xd9,0xff,0x00,0x00,0xfc]
+0x10,0x01,0xe0,0xd9,0xff,0x00,0x00,0xfc
+
+# GFX1250: ds_load_2addr_stride64_b64 v[6:9], v1 ; encoding: [0x00,0x00,0xe0,0xd9,0x01,0x00,0x00,0x06]
+0x00,0x00,0xe0,0xd9,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_2addr_stride64_b64 v[6:9], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xe0,0xd9,0x01,0x00,0x00,0x06]
+0x7f,0xff,0xe0,0xd9,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_addtid_b32 v255 offset:4 ; encoding: [0x04,0x00,0xc4,0xda,0x00,0x00,0x00,0xff]
+0x04,0x00,0xc4,0xda,0x00,0x00,0x00,0xff
+
+# GFX1250: ds_load_addtid_b32 v5 ; encoding: [0x00,0x00,0xc4,0xda,0x00,0x00,0x00,0x05]
+0x00,0x00,0xc4,0xda,0x00,0x00,0x00,0x05
+
+# GFX1250: ds_load_addtid_b32 v5 offset:65535 ; encoding: [0xff,0xff,0xc4,0xda,0x00,0x00,0x00,0x05]
+0xff,0xff,0xc4,0xda,0x00,0x00,0x00,0x05
+
+# GFX1250: ds_load_b128 v[252:255], v255 offset:4 ; encoding: [0x04,0x00,0xfc,0xdb,0xff,0x00,0x00,0xfc]
+0x04,0x00,0xfc,0xdb,0xff,0x00,0x00,0xfc
+
+# GFX1250: ds_load_b128 v[6:9], v1 ; encoding: [0x00,0x00,0xfc,0xdb,0x01,0x00,0x00,0x06]
+0x00,0x00,0xfc,0xdb,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_b128 v[6:9], v1 offset:65535 ; encoding: [0xff,0xff,0xfc,0xdb,0x01,0x00,0x00,0x06]
+0xff,0xff,0xfc,0xdb,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0xd8,0xd8,0xff,0x00,0x00,0xff]
+0x04,0x00,0xd8,0xd8,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_b32 v5, v1 ; encoding: [0x00,0x00,0xd8,0xd8,0x01,0x00,0x00,0x05]
+0x00,0x00,0xd8,0xd8,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_b32 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xd8,0xd8,0x01,0x00,0x00,0x05]
+0xff,0xff,0xd8,0xd8,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_b64 v[254:255], v255 offset:4 ; encoding: [0x04,0x00,0xd8,0xd9,0xff,0x00,0x00,0xfe]
+0x04,0x00,0xd8,0xd9,0xff,0x00,0x00,0xfe
+
+# GFX1250: ds_load_b64 v[6:7], v1 ; encoding: [0x00,0x00,0xd8,0xd9,0x01,0x00,0x00,0x06]
+0x00,0x00,0xd8,0xd9,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_b64 v[6:7], v1 offset:65535 ; encoding: [0xff,0xff,0xd8,0xd9,0x01,0x00,0x00,0x06]
+0xff,0xff,0xd8,0xd9,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_b96 v[252:254], v255 offset:4 ; encoding: [0x04,0x00,0xf8,0xdb,0xff,0x00,0x00,0xfc]
+0x04,0x00,0xf8,0xdb,0xff,0x00,0x00,0xfc
+
+# GFX1250: ds_load_b96 v[6:8], v1 ; encoding: [0x00,0x00,0xf8,0xdb,0x01,0x00,0x00,0x06]
+0x00,0x00,0xf8,0xdb,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_b96 v[6:8], v1 offset:65535 ; encoding: [0xff,0xff,0xf8,0xdb,0x01,0x00,0x00,0x06]
+0xff,0xff,0xf8,0xdb,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_i16 v255, v255 offset:4 ; encoding: [0x04,0x00,0xec,0xd8,0xff,0x00,0x00,0xff]
+0x04,0x00,0xec,0xd8,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_i16 v5, v1 ; encoding: [0x00,0x00,0xec,0xd8,0x01,0x00,0x00,0x05]
+0x00,0x00,0xec,0xd8,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_i16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xec,0xd8,0x01,0x00,0x00,0x05]
+0xff,0xff,0xec,0xd8,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_i8 v255, v255 offset:4 ; encoding: [0x04,0x00,0xe4,0xd8,0xff,0x00,0x00,0xff]
+0x04,0x00,0xe4,0xd8,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_i8 v5, v1 ; encoding: [0x00,0x00,0xe4,0xd8,0x01,0x00,0x00,0x05]
+0x00,0x00,0xe4,0xd8,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_i8 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xe4,0xd8,0x01,0x00,0x00,0x05]
+0xff,0xff,0xe4,0xd8,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_i8_d16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x90,0xda,0xff,0x00,0x00,0xff]
+0x04,0x00,0x90,0xda,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_i8_d16 v5, v1 ; encoding: [0x00,0x00,0x90,0xda,0x01,0x00,0x00,0x05]
+0x00,0x00,0x90,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_i8_d16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0x90,0xda,0x01,0x00,0x00,0x05]
+0xff,0xff,0x90,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_i8_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x94,0xda,0xff,0x00,0x00,0xff]
+0x04,0x00,0x94,0xda,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_i8_d16_hi v5, v1 ; encoding: [0x00,0x00,0x94,0xda,0x01,0x00,0x00,0x05]
+0x00,0x00,0x94,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_i8_d16_hi v5, v1 offset:65535 ; encoding: [0xff,0xff,0x94,0xda,0x01,0x00,0x00,0x05]
+0xff,0xff,0x94,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u16 v255, v255 offset:4 ; encoding: [0x04,0x00,0xf0,0xd8,0xff,0x00,0x00,0xff]
+0x04,0x00,0xf0,0xd8,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_u16 v5, v1 ; encoding: [0x00,0x00,0xf0,0xd8,0x01,0x00,0x00,0x05]
+0x00,0x00,0xf0,0xd8,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xf0,0xd8,0x01,0x00,0x00,0x05]
+0xff,0xff,0xf0,0xd8,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u16_d16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x98,0xda,0xff,0x00,0x00,0xff]
+0x04,0x00,0x98,0xda,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_u16_d16 v5, v1 ; encoding: [0x00,0x00,0x98,0xda,0x01,0x00,0x00,0x05]
+0x00,0x00,0x98,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u16_d16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0x98,0xda,0x01,0x00,0x00,0x05]
+0xff,0xff,0x98,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u16_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x9c,0xda,0xff,0x00,0x00,0xff]
+0x04,0x00,0x9c,0xda,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_u16_d16_hi v5, v1 ; encoding: [0x00,0x00,0x9c,0xda,0x01,0x00,0x00,0x05]
+0x00,0x00,0x9c,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u16_d16_hi v5, v1 offset:65535 ; encoding: [0xff,0xff,0x9c,0xda,0x01,0x00,0x00,0x05]
+0xff,0xff,0x9c,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u8 v255, v255 offset:4 ; encoding: [0x04,0x00,0xe8,0xd8,0xff,0x00,0x00,0xff]
+0x04,0x00,0xe8,0xd8,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_u8 v5, v1 ; encoding: [0x00,0x00,0xe8,0xd8,0x01,0x00,0x00,0x05]
+0x00,0x00,0xe8,0xd8,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u8 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xe8,0xd8,0x01,0x00,0x00,0x05]
+0xff,0xff,0xe8,0xd8,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u8_d16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x88,0xda,0xff,0x00,0x00,0xff]
+0x04,0x00,0x88,0xda,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_u8_d16 v5, v1 ; encoding: [0x00,0x00,0x88,0xda,0x01,0x00,0x00,0x05]
+0x00,0x00,0x88,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u8_d16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0x88,0xda,0x01,0x00,0x00,0x05]
+0xff,0xff,0x88,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u8_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x8c,0xda,0xff,0x00,0x00,0xff]
+0x04,0x00,0x8c,0xda,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_u8_d16_hi v5, v1 ; encoding: [0x00,0x00,0x8c,0xda,0x01,0x00,0x00,0x05]
+0x00,0x00,0x8c,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u8_d16_hi v5, v1 offset:65535 ; encoding: [0xff,0xff,0x8c,0xda,0x01,0x00,0x00,0x05]
+0xff,0xff,0x8c,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_max_i32 v1, v2 ; encoding: [0x00,0x00,0x18,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x18,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_i32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x18,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x18,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_i32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x18,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x18,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_max_i64 v1, v[2:3] ; encoding: [0x00,0x00,0x18,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x18,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_i64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x18,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x18,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_i64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x18,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x18,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_max_num_f32 v1, v2 ; encoding: [0x00,0x00,0x4c,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x4c,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_num_f32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x4c,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x4c,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_num_f32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x4c,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x4c,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_max_num_f64 v1, v[2:3] ; encoding: [0x00,0x00,0x4c,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x4c,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_num_f64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x4c,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x4c,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_num_f64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x4c,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x4c,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_max_num_rtn_f32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xcc,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0xcc,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_max_num_rtn_f32 v5, v1, v2 ; encoding: [0x00,0x00,0xcc,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0xcc,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_max_num_rtn_f32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xcc,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0xcc,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_max_num_rtn_f64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xcc,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0xcc,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_max_num_rtn_f64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xcc,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0xcc,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_max_num_rtn_f64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xcc,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0xcc,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_max_rtn_i32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x98,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0x98,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_max_rtn_i32 v5, v1, v2 ; encoding: [0x00,0x00,0x98,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0x98,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_max_rtn_i32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x98,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0x98,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_max_rtn_i64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x98,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0x98,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_max_rtn_i64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x98,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0x98,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_max_rtn_i64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x98,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0x98,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_max_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xa0,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0xa0,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_max_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0xa0,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0xa0,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_max_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xa0,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0xa0,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_max_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xa0,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0xa0,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_max_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xa0,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0xa0,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_max_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xa0,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0xa0,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_max_u32 v1, v2 ; encoding: [0x00,0x00,0x20,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x20,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x20,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x20,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x20,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x20,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_max_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x20,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x20,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x20,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x20,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x20,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x20,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_min_i32 v1, v2 ; encoding: [0x00,0x00,0x14,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x14,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_i32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x14,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x14,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_i32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x14,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x14,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_min_i64 v1, v[2:3] ; encoding: [0x00,0x00,0x14,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x14,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_i64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x14,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x14,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_i64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x14,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x14,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_min_num_f32 v1, v2 ; encoding: [0x00,0x00,0x48,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x48,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_num_f32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x48,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x48,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_num_f32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x48,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x48,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_min_num_f64 v1, v[2:3] ; encoding: [0x00,0x00,0x48,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x48,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_num_f64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x48,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x48,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_num_f64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x48,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x48,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_min_num_rtn_f32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xc8,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0xc8,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_min_num_rtn_f32 v5, v1, v2 ; encoding: [0x00,0x00,0xc8,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0xc8,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_min_num_rtn_f32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xc8,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0xc8,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_min_num_rtn_f64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xc8,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0xc8,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_min_num_rtn_f64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xc8,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0xc8,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_min_num_rtn_f64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xc8,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0xc8,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_min_rtn_i32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x94,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0x94,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_min_rtn_i32 v5, v1, v2 ; encoding: [0x00,0x00,0x94,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0x94,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_min_rtn_i32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x94,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0x94,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_min_rtn_i64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x94,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0x94,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_min_rtn_i64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x94,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0x94,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_min_rtn_i64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x94,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0x94,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_min_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x9c,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0x9c,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_min_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x9c,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0x9c,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_min_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x9c,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0x9c,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_min_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x9c,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0x9c,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_min_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x9c,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0x9c,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_min_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x9c,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0x9c,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_min_u32 v1, v2 ; encoding: [0x00,0x00,0x1c,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x1c,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x1c,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x1c,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x1c,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x1c,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_min_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x1c,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x1c,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x1c,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x1c,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x1c,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x1c,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_mskor_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x30,0xd8,0x01,0x02,0x03,0x00]
+0x00,0x00,0x30,0xd8,0x01,0x02,0x03,0x00
+
+# GFX1250: ds_mskor_b32 v1, v2, v3 offset:65535 ; encoding: [0xff,0xff,0x30,0xd8,0x01,0x02,0x03,0x00]
+0xff,0xff,0x30,0xd8,0x01,0x02,0x03,0x00
+
+# GFX1250: ds_mskor_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x30,0xd8,0xff,0xff,0xff,0x00]
+0x04,0x00,0x30,0xd8,0xff,0xff,0xff,0x00
+
+# GFX1250: ds_mskor_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x30,0xd9,0x01,0x02,0x04,0x00]
+0x00,0x00,0x30,0xd9,0x01,0x02,0x04,0x00
+
+# GFX1250: ds_mskor_b64 v1, v[2:3], v[4:5] offset:65535 ; encoding: [0xff,0xff,0x30,0xd9,0x01,0x02,0x04,0x00]
+0xff,0xff,0x30,0xd9,0x01,0x02,0x04,0x00
+
+# GFX1250: ds_mskor_b64 v255, v[254:255], v[254:255] offset:4 ; encoding: [0x04,0x00,0x30,0xd9,0xff,0xfe,0xfe,0x00]
+0x04,0x00,0x30,0xd9,0xff,0xfe,0xfe,0x00
+
+# GFX1250: ds_mskor_rtn_b32 v255, v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xb0,0xd8,0xff,0xff,0xff,0xff]
+0x04,0x00,0xb0,0xd8,0xff,0xff,0xff,0xff
+
+# GFX1250: ds_mskor_rtn_b32 v5, v1, v2, v3 ; encoding: [0x00,0x00,0xb0,0xd8,0x01,0x02,0x03,0x05]
+0x00,0x00,0xb0,0xd8,0x01,0x02,0x03,0x05
+
+# GFX1250: ds_mskor_rtn_b32 v5, v1, v2, v3 offset:65535 ; encoding: [0xff,0xff,0xb0,0xd8,0x01,0x02,0x03,0x05]
+0xff,0xff,0xb0,0xd8,0x01,0x02,0x03,0x05
+
+# GFX1250: ds_mskor_rtn_b64 v[254:255], v255, v[254:255], v[254:255] offset:4 ; encoding: [0x04,0x00,0xb0,0xd9,0xff,0xfe,0xfe,0xfe]
+0x04,0x00,0xb0,0xd9,0xff,0xfe,0xfe,0xfe
+
+# GFX1250: ds_mskor_rtn_b64 v[6:7], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xb0,0xd9,0x01,0x02,0x04,0x06]
+0x00,0x00,0xb0,0xd9,0x01,0x02,0x04,0x06
+
+# GFX1250: ds_mskor_rtn_b64 v[6:7], v1, v[2:3], v[4:5] offset:65535 ; encoding: [0xff,0xff,0xb0,0xd9,0x01,0x02,0x04,0x06]
+0xff,0xff,0xb0,0xd9,0x01,0x02,0x04,0x06
+
+# GFX1250: ds_nop ; encoding: [0x00,0x00,0x50,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd8,0x00,0x00,0x00,0x00
+
+# GFX1250: ds_or_b32 v1, v2 ; encoding: [0x00,0x00,0x28,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x28,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_or_b32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x28,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x28,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_or_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x28,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x28,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_or_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x28,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x28,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_or_b64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x28,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x28,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_or_b64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x28,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x28,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_or_rtn_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xa8,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0xa8,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_or_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xa8,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0xa8,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_or_rtn_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xa8,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0xa8,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_or_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xa8,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0xa8,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_or_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xa8,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0xa8,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_or_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xa8,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0xa8,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_permute_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xc8,0xda,0xff,0xff,0x00,0xff]
+0x04,0x00,0xc8,0xda,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_permute_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xc8,0xda,0x01,0x02,0x00,0x05]
+0x00,0x00,0xc8,0xda,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_permute_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xc8,0xda,0x01,0x02,0x00,0x05]
+0xff,0xff,0xc8,0xda,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_pk_add_bf16 v0, v0 ; encoding: [0x00,0x00,0x6c,0xda,0x00,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xda,0x00,0x00,0x00,0x00
+
+# GFX1250: ds_pk_add_bf16 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x6c,0xda,0x00,0x00,0x00,0x00]
+0xff,0xff,0x6c,0xda,0x00,0x00,0x00,0x00
+
+# GFX1250: ds_pk_add_bf16 v2, v1 ; encoding: [0x00,0x00,0x6c,0xda,0x02,0x01,0x00,0x00]
+0x00,0x00,0x6c,0xda,0x02,0x01,0x00,0x00
+
+# GFX1250: ds_pk_add_bf16 v255, v255 ; encoding: [0x00,0x00,0x6c,0xda,0xff,0xff,0x00,0x00]
+0x00,0x00,0x6c,0xda,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_pk_add_bf16 v255, v255 offset:4660 ; encoding: [0x34,0x12,0x6c,0xda,0xff,0xff,0x00,0x00]
+0x34,0x12,0x6c,0xda,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_pk_add_f16 v0, v0 ; encoding: [0x00,0x00,0x68,0xda,0x00,0x00,0x00,0x00]
+0x00,0x00,0x68,0xda,0x00,0x00,0x00,0x00
+
+# GFX1250: ds_pk_add_f16 v2, v1 ; encoding: [0x00,0x00,0x68,0xda,0x02,0x01,0x00,0x00]
+0x00,0x00,0x68,0xda,0x02,0x01,0x00,0x00
+
+# GFX1250: ds_pk_add_f16 v2, v1 offset:4660 ; encoding: [0x34,0x12,0x68,0xda,0x02,0x01,0x00,0x00]
+0x34,0x12,0x68,0xda,0x02,0x01,0x00,0x00
+
+# GFX1250: ds_pk_add_f16 v2, v1 offset:65535 ; encoding: [0xff,0xff,0x68,0xda,0x02,0x01,0x00,0x00]
+0xff,0xff,0x68,0xda,0x02,0x01,0x00,0x00
+
+# GFX1250: ds_pk_add_f16 v255, v255 ; encoding: [0x00,0x00,0x68,0xda,0xff,0xff,0x00,0x00]
+0x00,0x00,0x68,0xda,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_pk_add_f16 v255, v255 offset:4660 ; encoding: [0x34,0x12,0x68,0xda,0xff,0xff,0x00,0x00]
+0x34,0x12,0x68,0xda,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_pk_add_f16 v255, v255 offset:65535 ; encoding: [0xff,0xff,0x68,0xda,0xff,0xff,0x00,0x00]
+0xff,0xff,0x68,0xda,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_pk_add_rtn_bf16 v255, v0, v200 ; encoding: [0x00,0x00,0xac,0xda,0x00,0xc8,0x00,0xff]
+0x00,0x00,0xac,0xda,0x00,0xc8,0x00,0xff
+
+# GFX1250: ds_pk_add_rtn_bf16 v255, v255, v255 ; encoding: [0x00,0x00,0xac,0xda,0xff,0xff,0x00,0xff]
+0x00,0x00,0xac,0xda,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_pk_add_rtn_bf16 v255, v255, v255 offset:65535 ; encoding: [0xff,0xff,0xac,0xda,0xff,0xff,0x00,0xff]
+0xff,0xff,0xac,0xda,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_pk_add_rtn_bf16 v3, v2, v1 ; encoding: [0x00,0x00,0xac,0xda,0x02,0x01,0x00,0x03]
+0x00,0x00,0xac,0xda,0x02,0x01,0x00,0x03
+
+# GFX1250: ds_pk_add_rtn_bf16 v3, v2, v1 offset:4660 ; encoding: [0x34,0x12,0xac,0xda,0x02,0x01,0x00,0x03]
+0x34,0x12,0xac,0xda,0x02,0x01,0x00,0x03
+
+# GFX1250: ds_pk_add_rtn_f16 v255, v0, v200 ; encoding: [0x00,0x00,0xa8,0xda,0x00,0xc8,0x00,0xff]
+0x00,0x00,0xa8,0xda,0x00,0xc8,0x00,0xff
+
+# GFX1250: ds_pk_add_rtn_f16 v255, v0, v200 offset:65535 ; encoding: [0xff,0xff,0xa8,0xda,0x00,0xc8,0x00,0xff]
+0xff,0xff,0xa8,0xda,0x00,0xc8,0x00,0xff
+
+# GFX1250: ds_pk_add_rtn_f16 v255, v255, v255 ; encoding: [0x00,0x00,0xa8,0xda,0xff,0xff,0x00,0xff]
+0x00,0x00,0xa8,0xda,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_pk_add_rtn_f16 v3, v2, v1 ; encoding: [0x00,0x00,0xa8,0xda,0x02,0x01,0x00,0x03]
+0x00,0x00,0xa8,0xda,0x02,0x01,0x00,0x03
+
+# GFX1250: ds_pk_add_rtn_f16 v3, v2, v1 offset:4660 ; encoding: [0x34,0x12,0xa8,0xda,0x02,0x01,0x00,0x03]
+0x34,0x12,0xa8,0xda,0x02,0x01,0x00,0x03
+
+# GFX1250: ds_rsub_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x88,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0x88,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_rsub_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x88,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0x88,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_rsub_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x88,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0x88,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_rsub_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x88,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0x88,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_rsub_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x88,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0x88,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_rsub_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x88,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0x88,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_rsub_u32 v1, v2 ; encoding: [0x00,0x00,0x08,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x08,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_rsub_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x08,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x08,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_rsub_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x08,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x08,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_rsub_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x08,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x08,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_rsub_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x08,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x08,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_rsub_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x08,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x08,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_store_2addr_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x38,0xd8,0x01,0x02,0x03,0x00]
+0x00,0x00,0x38,0xd8,0x01,0x02,0x03,0x00
+
+# GFX1250: ds_store_2addr_b32 v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x38,0xd8,0x01,0x02,0x03,0x00]
+0x7f,0xff,0x38,0xd8,0x01,0x02,0x03,0x00
+
+# GFX1250: ds_store_2addr_b32 v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0x38,0xd8,0xff,0xff,0xff,0x00]
+0x10,0x01,0x38,0xd8,0xff,0xff,0xff,0x00
+
+# GFX1250: ds_store_2addr_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x38,0xd9,0x01,0x02,0x04,0x00]
+0x00,0x00,0x38,0xd9,0x01,0x02,0x04,0x00
+
+# GFX1250: ds_store_2addr_b64 v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x38,0xd9,0x01,0x02,0x04,0x00]
+0x7f,0xff,0x38,0xd9,0x01,0x02,0x04,0x00
+
+# GFX1250: ds_store_2addr_b64 v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0x38,0xd9,0xff,0xfe,0xfe,0x00]
+0x10,0x01,0x38,0xd9,0xff,0xfe,0xfe,0x00
+
+# GFX1250: ds_store_2addr_stride64_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x3c,0xd8,0x01,0x02,0x03,0x00]
+0x00,0x00,0x3c,0xd8,0x01,0x02,0x03,0x00
+
+# GFX1250: ds_store_2addr_stride64_b32 v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x3c,0xd8,0x01,0x02,0x03,0x00]
+0x7f,0xff,0x3c,0xd8,0x01,0x02,0x03,0x00
+
+# GFX1250: ds_store_2addr_stride64_b32 v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0x3c,0xd8,0xff,0xff,0xff,0x00]
+0x10,0x01,0x3c,0xd8,0xff,0xff,0xff,0x00
+
+# GFX1250: ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x3c,0xd9,0x01,0x02,0x04,0x00]
+0x00,0x00,0x3c,0xd9,0x01,0x02,0x04,0x00
+
+# GFX1250: ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x3c,0xd9,0x01,0x02,0x04,0x00]
+0x7f,0xff,0x3c,0xd9,0x01,0x02,0x04,0x00
+
+# GFX1250: ds_store_2addr_stride64_b64 v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0x3c,0xd9,0xff,0xfe,0xfe,0x00]
+0x10,0x01,0x3c,0xd9,0xff,0xfe,0xfe,0x00
+
+# GFX1250: ds_store_addtid_b32 v1 ; encoding: [0x00,0x00,0xc0,0xda,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc0,0xda,0x00,0x01,0x00,0x00
+
+# GFX1250: ds_store_addtid_b32 v1 offset:65535 ; encoding: [0xff,0xff,0xc0,0xda,0x00,0x01,0x00,0x00]
+0xff,0xff,0xc0,0xda,0x00,0x01,0x00,0x00
+
+# GFX1250: ds_store_addtid_b32 v255 offset:4 ; encoding: [0x04,0x00,0xc0,0xda,0x00,0xff,0x00,0x00]
+0x04,0x00,0xc0,0xda,0x00,0xff,0x00,0x00
+
+# GFX1250: ds_store_b128 v1, v[2:5] ; encoding: [0x00,0x00,0x7c,0xdb,0x01,0x02,0x00,0x00]
+0x00,0x00,0x7c,0xdb,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b128 v1, v[2:5] offset:65535 ; encoding: [0xff,0xff,0x7c,0xdb,0x01,0x02,0x00,0x00]
+0xff,0xff,0x7c,0xdb,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b128 v255, v[252:255] offset:4 ; encoding: [0x04,0x00,0x7c,0xdb,0xff,0xfc,0x00,0x00]
+0x04,0x00,0x7c,0xdb,0xff,0xfc,0x00,0x00
+
+# GFX1250: ds_store_b16 v1, v2 ; encoding: [0x00,0x00,0x7c,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x7c,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b16 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x7c,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x7c,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x7c,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x7c,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_store_b16_d16_hi v1, v2 ; encoding: [0x00,0x00,0x84,0xda,0x01,0x02,0x00,0x00]
+0x00,0x00,0x84,0xda,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b16_d16_hi v1, v2 offset:65535 ; encoding: [0xff,0xff,0x84,0xda,0x01,0x02,0x00,0x00]
+0xff,0xff,0x84,0xda,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b16_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x84,0xda,0xff,0xff,0x00,0x00]
+0x04,0x00,0x84,0xda,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_store_b32 v1, v2 ; encoding: [0x00,0x00,0x34,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x34,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x34,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x34,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x34,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x34,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_store_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x34,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x34,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x34,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x34,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x34,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x34,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_store_b8 v1, v2 ; encoding: [0x00,0x00,0x78,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x78,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b8 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x78,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x78,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b8 v255, v255 offset:4 ; encoding: [0x04,0x00,0x78,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x78,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_store_b8_d16_hi v1, v2 ; encoding: [0x00,0x00,0x80,0xda,0x01,0x02,0x00,0x00]
+0x00,0x00,0x80,0xda,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b8_d16_hi v1, v2 offset:65535 ; encoding: [0xff,0xff,0x80,0xda,0x01,0x02,0x00,0x00]
+0xff,0xff,0x80,0xda,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b8_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x80,0xda,0xff,0xff,0x00,0x00]
+0x04,0x00,0x80,0xda,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_store_b96 v1, v[2:4] ; encoding: [0x00,0x00,0x78,0xdb,0x01,0x02,0x00,0x00]
+0x00,0x00,0x78,0xdb,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b96 v1, v[2:4] offset:65535 ; encoding: [0xff,0xff,0x78,0xdb,0x01,0x02,0x00,0x00]
+0xff,0xff,0x78,0xdb,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b96 v255, v[252:254] offset:4 ; encoding: [0x04,0x00,0x78,0xdb,0xff,0xfc,0x00,0x00]
+0x04,0x00,0x78,0xdb,0xff,0xfc,0x00,0x00
+
+# GFX1250: ds_storexchg_2addr_rtn_b32 v[254:255], v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xb8,0xd8,0xff,0xff,0xff,0xfe]
+0x10,0x01,0xb8,0xd8,0xff,0xff,0xff,0xfe
+
+# GFX1250: ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3 ; encoding: [0x00,0x00,0xb8,0xd8,0x01,0x02,0x03,0x06]
+0x00,0x00,0xb8,0xd8,0x01,0x02,0x03,0x06
+
+# GFX1250: ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xb8,0xd8,0x01,0x02,0x03,0x06]
+0x7f,0xff,0xb8,0xd8,0x01,0x02,0x03,0x06
+
+# GFX1250: ds_storexchg_2addr_rtn_b64 v[252:255], v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0xb8,0xd9,0xff,0xfe,0xfe,0xfc]
+0x10,0x01,0xb8,0xd9,0xff,0xfe,0xfe,0xfc
+
+# GFX1250: ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xb8,0xd9,0x01,0x02,0x04,0x06]
+0x00,0x00,0xb8,0xd9,0x01,0x02,0x04,0x06
+
+# GFX1250: ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xb8,0xd9,0x01,0x02,0x04,0x06]
+0x7f,0xff,0xb8,0xd9,0x01,0x02,0x04,0x06
+
+# GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[254:255], v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xbc,0xd8,0xff,0xff,0xff,0xfe]
+0x10,0x01,0xbc,0xd8,0xff,0xff,0xff,0xfe
+
+# GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3 ; encoding: [0x00,0x00,0xbc,0xd8,0x01,0x02,0x03,0x06]
+0x00,0x00,0xbc,0xd8,0x01,0x02,0x03,0x06
+
+# GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xbc,0xd8,0x01,0x02,0x03,0x06]
+0x7f,0xff,0xbc,0xd8,0x01,0x02,0x03,0x06
+
+# GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[252:255], v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0xbc,0xd9,0xff,0xfe,0xfe,0xfc]
+0x10,0x01,0xbc,0xd9,0xff,0xfe,0xfe,0xfc
+
+# GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xbc,0xd9,0x01,0x02,0x04,0x06]
+0x00,0x00,0xbc,0xd9,0x01,0x02,0x04,0x06
+
+# GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xbc,0xd9,0x01,0x02,0x04,0x06]
+0x7f,0xff,0xbc,0xd9,0x01,0x02,0x04,0x06
+
+# GFX1250: ds_storexchg_rtn_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xb4,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0xb4,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_storexchg_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xb4,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0xb4,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_storexchg_rtn_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xb4,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0xb4,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_storexchg_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xb4,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0xb4,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_storexchg_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xb4,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0xb4,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_storexchg_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xb4,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0xb4,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_sub_clamp_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xa4,0xda,0xff,0xff,0x00,0xff]
+0x04,0x00,0xa4,0xda,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_sub_clamp_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0xa4,0xda,0x01,0x02,0x00,0x05]
+0x00,0x00,0xa4,0xda,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_sub_clamp_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xa4,0xda,0x01,0x02,0x00,0x05]
+0xff,0xff,0xa4,0xda,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_sub_clamp_u32 v1, v2 ; encoding: [0x00,0x00,0x64,0xda,0x01,0x02,0x00,0x00]
+0x00,0x00,0x64,0xda,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_sub_clamp_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x64,0xda,0x01,0x02,0x00,0x00]
+0xff,0xff,0x64,0xda,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_sub_clamp_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x64,0xda,0xff,0xff,0x00,0x00]
+0x04,0x00,0x64,0xda,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_sub_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x84,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0x84,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_sub_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x84,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0x84,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_sub_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x84,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0x84,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_sub_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x84,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0x84,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_sub_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x84,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0x84,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_sub_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x84,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0x84,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_sub_u32 v1, v2 ; encoding: [0x00,0x00,0x04,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x04,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_sub_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x04,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x04,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_sub_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x04,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x04,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_sub_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x04,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x04,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_sub_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x04,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x04,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_sub_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x04,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x04,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_swizzle_b32 v8, v2 ; encoding: [0x00,0x00,0xd4,0xd8,0x02,0x00,0x00,0x08]
+0x00,0x00,0xd4,0xd8,0x02,0x00,0x00,0x08
+
+# GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(FFT,31) ; encoding: [0xff,0xff,0xd4,0xd8,0x02,0x00,0x00,0x08]
+0xff,0xff,0xd4,0xd8,0x02,0x00,0x00,0x08
+
+# GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(BITMASK_PERM,"01pip") ; encoding: [0x07,0x09,0xd4,0xd8,0x02,0x00,0x00,0x08]
+0x07,0x09,0xd4,0xd8,0x02,0x00,0x00,0x08
+
+# GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(BROADCAST,4,1) ; encoding: [0x3c,0x00,0xd4,0xd8,0x02,0x00,0x00,0x08]
+0x3c,0x00,0xd4,0xd8,0x02,0x00,0x00,0x08
+
+# GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(BROADCAST,8,7) ; encoding: [0xf8,0x00,0xd4,0xd8,0x02,0x00,0x00,0x08]
+0xf8,0x00,0xd4,0xd8,0x02,0x00,0x00,0x08
+
+# GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(QUAD_PERM,0,1,2,3) ; encoding: [0xe4,0x80,0xd4,0xd8,0x02,0x00,0x00,0x08]
+0xe4,0x80,0xd4,0xd8,0x02,0x00,0x00,0x08
+
+# GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(REVERSE,8) ; encoding: [0x1f,0x1c,0xd4,0xd8,0x02,0x00,0x00,0x08]
+0x1f,0x1c,0xd4,0xd8,0x02,0x00,0x00,0x08
+
+# GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(SWAP,16) ; encoding: [0x1f,0x40,0xd4,0xd8,0x02,0x00,0x00,0x08]
+0x1f,0x40,0xd4,0xd8,0x02,0x00,0x00,0x08
+
+# GFX1250: ds_xor_b32 v1, v2 ; encoding: [0x00,0x00,0x2c,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x2c,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_xor_b32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x2c,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x2c,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_xor_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x2c,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x2c,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_xor_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x2c,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x2c,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_xor_b64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x2c,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x2c,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_xor_b64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x2c,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x2c,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_xor_rtn_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xac,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0xac,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_xor_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xac,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0xac,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_xor_rtn_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xac,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0xac,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_xor_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xac,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0xac,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_xor_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xac,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0xac,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_xor_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xac,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0xac,0xd9,0x01,0x02,0x00,0x06
+
# GFX1250: ds_atomic_async_barrier_arrive_b64 v1 offset:65407 ; encoding: [0x7f,0xff,0x58,0xd9,0x01,0x00,0x00,0x00]
0x7f,0xff,0x58,0xd9,0x01,0x00,0x00,0x00
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_operands.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_operands.txt
new file mode 100644
index 0000000..d72009b
--- /dev/null
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_operands.txt
@@ -0,0 +1,34 @@
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -disassemble -show-encoding < %s | FileCheck -strict-whitespace -check-prefix=GFX1250 %s
+
+# GFX1250: s_mov_b32 s0, src_flat_scratch_base_lo ; encoding: [0xe6,0x00,0x80,0xbe]
+0xe6,0x00,0x80,0xbe
+
+# GFX1250: s_mov_b32 s0, src_flat_scratch_base_hi ; encoding: [0xe7,0x00,0x80,0xbe]
+0xe7,0x00,0x80,0xbe
+
+# GFX1250: s_mov_b64 s[0:1], src_flat_scratch_base_lo ; encoding: [0xe6,0x01,0x80,0xbe]
+0xe6,0x01,0x80,0xbe
+
+# GFX1250: s_mov_b64 s[0:1], src_shared_base ; encoding: [0xeb,0x01,0x80,0xbe]
+0xeb,0x01,0x80,0xbe
+
+# GFX1250: s_mov_b64 s[0:1], src_shared_base ; encoding: [0xeb,0x01,0x80,0xbe]
+0xeb,0x01,0x80,0xbe
+
+# GFX1250: s_mov_b64 s[0:1], src_shared_limit ; encoding: [0xec,0x01,0x80,0xbe]
+0xec,0x01,0x80,0xbe
+
+# GFX1250: s_mov_b64 s[0:1], src_shared_limit ; encoding: [0xec,0x01,0x80,0xbe]
+0xec,0x01,0x80,0xbe
+
+# GFX1250: s_getreg_b32 s1, hwreg(HW_REG_XNACK_STATE_PRIV) ; encoding: [0x21,0xf8,0x81,0xb8]
+0x21,0xf8,0x81,0xb8
+
+# GFX1250: s_getreg_b32 s1, hwreg(HW_REG_XNACK_MASK) ; encoding: [0x22,0xf8,0x81,0xb8]
+0x22,0xf8,0x81,0xb8
+
+# GFX1250: s_setreg_b32 hwreg(HW_REG_XNACK_STATE_PRIV), s1 ; encoding: [0x21,0xf8,0x01,0xb9]
+0x21,0xf8,0x01,0xb9
+
+# GFX1250: s_setreg_b32 hwreg(HW_REG_XNACK_MASK), s1 ; encoding: [0x22,0xf8,0x01,0xb9]
+0x22,0xf8,0x01,0xb9
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_sop1.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_sop1.txt
index 83fa647..07aca1e 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_sop1.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_sop1.txt
@@ -12,6 +12,9 @@
# GFX1250: s_add_pc_i64 s[2:3] ; encoding: [0x02,0x4b,0x80,0xbe]
0x02,0x4b,0x80,0xbe
+# GFX1250: s_get_shader_cycles_u64 s[2:3] ; encoding: [0x00,0x06,0x82,0xbe]
+0x00,0x06,0x82,0xbe
+
# GFX1250: s_barrier_signal -3 ; encoding: [0xc3,0x4e,0x80,0xbe]
0xc3,0x4e,0x80,0xbe
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vbuffer_mubuf.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vbuffer_mubuf.txt
index 2499225..ddf779a 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vbuffer_mubuf.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vbuffer_mubuf.txt
@@ -1,5 +1,2138 @@
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250 %s
+# GFX1250: buffer_atomic_add_f32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x15,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_f32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x15,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x15,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x15,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x15,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_f32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x15,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_f32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_f32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0d,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0d,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0d,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x10,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x10,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x10,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x10,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x10,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x10,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0d,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0d,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0d,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0d,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0d,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0d,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[252:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0xfc,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x10,0xc4,0xfc,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x10,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x10,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x10,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x10,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x10,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x14,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x14,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x14,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x14,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x14,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x14,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x10,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x10,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x10,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x10,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x10,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x10,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x13,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x13,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x13,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x13,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x13,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x13,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x13,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x13,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x13,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x13,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x13,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x13,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x14,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x14,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x14,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x14,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x14,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x14,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x14,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x14,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x14,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x14,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x14,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x14,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x16,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x16,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x16,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x16,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x16,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x16,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x16,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x16,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x16,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x16,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x16,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x16,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v255, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0xff,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0xff,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v255, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0xff,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0xff,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v255, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0xff,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0xff,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[12:15], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[12:15], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x18,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x18,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[12:15], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x18,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x18,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], m0 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x7d,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], m0 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x7d,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], m0 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x7d,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s101 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x65,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s101 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x65,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s101 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x65,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:7 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:7 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:7 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[96:99], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0xc0,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0xc0,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[96:99], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0xc0,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0xc0,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[96:99], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0xc0,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0xc0,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 idxen offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 idxen offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 idxen offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 offen offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 offen offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 offen offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0d,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0d,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0d,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0c,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0c,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0c,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0c,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0c,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0c,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x10,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x10,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x10,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x10,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x10,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x10,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b128 v[252:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0xfc,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x05,0xc4,0xfc,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b128 v[6:9], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x05,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b128 v[6:9], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x05,0xc4,0x06,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x05,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b128 v[6:9], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x05,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b128 v[6:9], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b128 v[6:9], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x05,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x05,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_b32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_b32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x05,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x05,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x05,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x05,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x05,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x05,0xc4,0x06,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x05,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x05,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b96 v[252:254], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0xfc,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x05,0xc4,0xfc,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b96 v[6:8], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x05,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b96 v[6:8], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x05,0xc4,0x06,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x05,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b96 v[6:8], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x05,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b96 v[6:8], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b96 v[6:8], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_b16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_b16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_b16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_b16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_b16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_b16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_b16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_b16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_b16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_b16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_b16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_i8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_i8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_i8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_i8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_i8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_u8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_u8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_u8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_u8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_u8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_i8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x07,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_i8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x07,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_i8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x07,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x07,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_i8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x07,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_i8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_i8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_u8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x07,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_u8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x07,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_u8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x07,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x07,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_u8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x07,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_u8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_u8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i16 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_i16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_i16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i8 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_i8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_i8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u16 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_u16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_u16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u8 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_u8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_u8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b128 v[252:255], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0xfc,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x07,0xc4,0xfc,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b128 v[2:5], off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 offset:7 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x07,0xc4,0x02,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x07,0xc4,0x02,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b128 v[2:5], off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x07,0xc4,0x02,0x20,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b128 v[2:5], off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x07,0xc4,0x02,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b128 v[2:5], v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b128 v[2:5], v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b16 v1, off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b16 v1, off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b16 v1, off, s[12:15], s4 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_store_b16 v1, off, s[12:15], s4 offset:7 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_store_b16 v1, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x06,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x06,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b16 v1, off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x06,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b16 v1, off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x06,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b16 v1, v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b16 v1, v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b16 v255, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x06,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b32 v1, off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b32 v1, off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b32 v1, off, s[12:15], s4 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_store_b32 v1, off, s[12:15], s4 offset:7 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_store_b32 v1, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b32 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x80,0x06,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b32 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x80,0x06,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b32 v1, off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x80,0x06,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b32 v1, off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x80,0x06,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b32 v1, v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b32 v1, v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b32 v255, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x80,0x06,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b64 v[254:255], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0xfe,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0xc0,0x06,0xc4,0xfe,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b64 v[2:3], off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 offset:7 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x04,0xc0,0x06,0xc4,0x02,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x04,0xc0,0x06,0xc4,0x02,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b64 v[2:3], off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0xc0,0x06,0xc4,0x02,0x20,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b64 v[2:3], off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0xc0,0x06,0xc4,0x02,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b64 v[2:3], v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b64 v[2:3], v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b8 v1, off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b8 v1, off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b8 v1, off, s[12:15], s4 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_store_b8 v1, off, s[12:15], s4 offset:7 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_store_b8 v1, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x06,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x06,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b8 v1, off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x06,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b8 v1, off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x06,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b8 v1, v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b8 v1, v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b8 v255, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x06,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b96 v[252:254], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0xfc,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x07,0xc4,0xfc,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b96 v[2:4], off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 offset:7 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x07,0xc4,0x02,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x07,0xc4,0x02,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b96 v[2:4], off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x07,0xc4,0x02,0x20,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b96 v[2:4], off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x07,0xc4,0x02,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b96 v[2:4], v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b96 v[2:4], v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:7 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x09,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x09,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b16 v1, off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x09,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b16 v1, off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x09,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b16 v1, v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b16 v1, v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b16 v255, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x09,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:7 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x09,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x09,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b8 v1, off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x09,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b8 v1, off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x09,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b8 v1, v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b8 v1, v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b8 v255, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x09,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
# GFX1250: buffer_atomic_and_b32 v5, v1, s[8:11], s3 offen offset:4095 nv ; encoding: [0x83,0x00,0x0f,0xc4,0x05,0x10,0x80,0x40,0x01,0xff,0x0f,0x00]
0x83,0x00,0x0f,0xc4,0x05,0x10,0x80,0x40,0x01,0xff,0x0f,0x00
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3cx.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3cx.txt
new file mode 100644
index 0000000..e419e4583
--- /dev/null
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3cx.txt
@@ -0,0 +1,3413 @@
+# NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250 %s
+
+0x7e,0x00,0xfd,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_class_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xfd,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x01,0xfd,0xd4,0xff,0xd6,0x00,0x20,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_class_f16_e64 -|0xfe0b|, vcc_hi ; encoding: [0x7e,0x01,0xfd,0xd4,0xff,0xd6,0x00,0x20,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xfd,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_class_f16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xfd,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xfd,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_class_f16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xfd,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xfd,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_class_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xfd,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xfd,0xd4,0x7d,0xfa,0x01,0x00
+# GFX1250: v_cmpx_class_f16_e64 m0, src_scc ; encoding: [0x7e,0x00,0xfd,0xd4,0x7d,0xfa,0x01,0x00]
+
+0x7e,0x00,0xfd,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_class_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xfd,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xfd,0xd4,0x01,0x04,0x02,0x00
+# GFX1250: v_cmpx_class_f16_e64 s1, v2 ; encoding: [0x7e,0x00,0xfd,0xd4,0x01,0x04,0x02,0x00]
+
+0x7e,0x00,0xfd,0xd4,0x69,0xfe,0x03,0x00
+# GFX1250: v_cmpx_class_f16_e64 s105, v255 ; encoding: [0x7e,0x00,0xfd,0xd4,0x69,0xfe,0x03,0x00]
+
+0x7e,0x00,0xfd,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_class_f16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xfd,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xfd,0xd4,0x7b,0xf6,0x00,0x00
+# GFX1250: v_cmpx_class_f16_e64 ttmp15, ttmp15 ; encoding: [0x7e,0x00,0xfd,0xd4,0x7b,0xf6,0x00,0x00]
+
+0x7e,0x00,0xfd,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_class_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0xfd,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xfd,0xd4,0xff,0x05,0x02,0x00
+# GFX1250: v_cmpx_class_f16_e64 v255, v2 ; encoding: [0x7e,0x00,0xfd,0xd4,0xff,0x05,0x02,0x00]
+
+0x7e,0x00,0xfd,0xd4,0x6b,0xd2,0x00,0x00
+# GFX1250: v_cmpx_class_f16_e64 vcc_hi, s105 ; encoding: [0x7e,0x00,0xfd,0xd4,0x6b,0xd2,0x00,0x00]
+
+0x7e,0x00,0xfd,0xd4,0x6a,0x04,0x00,0x00
+# GFX1250: v_cmpx_class_f16_e64 vcc_lo, s2 ; encoding: [0x7e,0x00,0xfd,0xd4,0x6a,0x04,0x00,0x00]
+
+0x7e,0x00,0xfe,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_class_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xfe,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x01,0xfe,0xd4,0xff,0xd6,0x00,0x20,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_class_f32_e64 -|0xaf123456|, vcc_hi ; encoding: [0x7e,0x01,0xfe,0xd4,0xff,0xd6,0x00,0x20,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xfe,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_class_f32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xfe,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xfe,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_class_f32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xfe,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xfe,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_class_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xfe,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xfe,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_class_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xfe,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xfe,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_class_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xfe,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xfe,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_class_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0xfe,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xfe,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_class_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0xfe,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xfe,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_class_f32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xfe,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xfe,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_class_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xfe,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xfe,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_class_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0xfe,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xfe,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_class_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0xfe,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xfe,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_class_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xfe,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xfe,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_class_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xfe,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xff,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_class_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xff,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xff,0xd4,0xfd,0xfa,0x01,0x20
+# GFX1250: v_cmpx_class_f64_e64 -|src_scc|, src_scc ; encoding: [0x7e,0x01,0xff,0xd4,0xfd,0xfa,0x01,0x20]
+
+0x7e,0x00,0xff,0xd4,0xf0,0xe0,0x01,0x00
+# GFX1250: v_cmpx_class_f64_e64 0.5, 0.5 ; encoding: [0x7e,0x00,0xff,0xd4,0xf0,0xe0,0x01,0x00]
+
+0x7e,0x00,0xff,0xd4,0xff,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_class_f64_e64 0xaf123456, 0xaf123456 ; encoding: [0x7e,0x00,0xff,0xd4,0xff,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xff,0xd4,0x7e,0xfc,0x00,0x00
+# GFX1250: v_cmpx_class_f64_e64 exec, exec_lo ; encoding: [0x7e,0x00,0xff,0xd4,0x7e,0xfc,0x00,0x00]
+
+0x7e,0x00,0xff,0xd4,0x7c,0xf8,0x00,0x00
+# GFX1250: v_cmpx_class_f64_e64 null, null ; encoding: [0x7e,0x00,0xff,0xd4,0x7c,0xf8,0x00,0x00]
+
+0x7e,0x00,0xff,0xd4,0x68,0xd4,0x00,0x00
+# GFX1250: v_cmpx_class_f64_e64 s[104:105], vcc_lo ; encoding: [0x7e,0x00,0xff,0xd4,0x68,0xd4,0x00,0x00]
+
+0x7e,0x00,0xff,0xd4,0x02,0xd6,0x00,0x00
+# GFX1250: v_cmpx_class_f64_e64 s[2:3], vcc_hi ; encoding: [0x7e,0x00,0xff,0xd4,0x02,0xd6,0x00,0x00]
+
+0x7e,0x00,0xff,0xd4,0x7a,0xfe,0x00,0x00
+# GFX1250: v_cmpx_class_f64_e64 ttmp[14:15], exec_hi ; encoding: [0x7e,0x00,0xff,0xd4,0x7a,0xfe,0x00,0x00]
+
+0x7e,0x00,0xff,0xd4,0xfe,0xf7,0x00,0x00
+# GFX1250: v_cmpx_class_f64_e64 v[254:255], ttmp15 ; encoding: [0x7e,0x00,0xff,0xd4,0xfe,0xf7,0x00,0x00]
+
+0x7e,0x00,0xff,0xd4,0x02,0xd3,0x00,0x00
+# GFX1250: v_cmpx_class_f64_e64 v[2:3], s105 ; encoding: [0x7e,0x00,0xff,0xd4,0x02,0xd3,0x00,0x00]
+
+0x7e,0x00,0xff,0xd4,0x02,0x05,0x00,0x00
+# GFX1250: v_cmpx_class_f64_e64 v[2:3], s2 ; encoding: [0x7e,0x00,0xff,0xd4,0x02,0x05,0x00,0x00]
+
+0x7e,0x00,0xff,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_class_f64_e64 v[2:3], v2 ; encoding: [0x7e,0x00,0xff,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xff,0xd4,0x02,0xff,0x03,0x00
+# GFX1250: v_cmpx_class_f64_e64 v[2:3], v255 ; encoding: [0x7e,0x00,0xff,0xd4,0x02,0xff,0x03,0x00]
+
+0x7e,0x00,0xff,0xd4,0x6a,0xfa,0x00,0x00
+# GFX1250: v_cmpx_class_f64_e64 vcc, m0 ; encoding: [0x7e,0x00,0xff,0xd4,0x6a,0xfa,0x00,0x00]
+
+0x7e,0x00,0x82,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x82,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x82,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_eq_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x82,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x82,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x82,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x82,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_eq_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x82,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x82,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_eq_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x82,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x82,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_eq_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x82,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x82,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_eq_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x82,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x82,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_eq_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x82,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x82,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_eq_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x82,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x82,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_eq_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x82,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x82,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_eq_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x82,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x82,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_eq_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x82,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x82,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x82,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x82,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_eq_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x82,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x82,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_eq_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x82,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x92,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x92,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x92,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_eq_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x92,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x92,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x92,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x92,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_eq_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x92,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x92,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_eq_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x92,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x92,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_eq_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x92,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x92,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_eq_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x92,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x92,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_eq_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x92,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x92,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_eq_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x92,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x92,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_eq_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x92,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x92,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_eq_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x92,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x92,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_eq_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x92,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x92,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x92,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x92,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_eq_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x92,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x92,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_eq_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x92,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xa2,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_eq_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa2,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xa2,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_eq_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa2,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xa2,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_eq_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa2,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xa2,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_eq_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa2,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xa2,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa2,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa2,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_eq_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa2,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xa2,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_eq_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa2,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xa2,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_eq_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa2,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xa2,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa2,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa2,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_eq_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa2,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xa2,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_eq_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa2,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xa2,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_eq_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa2,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb2,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xb2,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb2,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb2,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_eq_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb2,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xb2,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xb2,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb2,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb2,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb2,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb2,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_eq_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb2,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xb2,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_eq_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb2,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xb2,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_eq_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb2,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xb2,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb2,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb2,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xc2,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc2,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xc2,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_eq_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc2,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xc2,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc2,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc2,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_eq_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc2,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xc2,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_eq_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc2,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xc2,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_eq_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc2,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xc2,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_eq_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc2,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xc2,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_eq_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc2,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xc2,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_eq_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc2,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xc2,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_eq_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc2,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xc2,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_eq_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc2,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xc2,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_eq_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc2,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xc2,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_eq_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc2,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xc2,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc2,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc2,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_eq_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc2,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xd2,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_eq_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd2,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xd2,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_eq_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd2,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xd2,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd2,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd2,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_eq_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd2,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xd2,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_eq_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd2,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xd2,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_eq_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd2,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xd2,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_eq_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd2,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xd2,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_eq_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd2,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xd2,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd2,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd2,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_eq_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd2,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xd2,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_eq_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd2,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xd2,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_eq_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd2,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xba,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xba,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xba,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xba,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_eq_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xba,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xba,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xba,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xba,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xba,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xba,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xba,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_eq_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xba,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xba,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_eq_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xba,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xba,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_eq_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xba,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xba,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xba,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xba,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xca,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xca,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xca,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_eq_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xca,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xca,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xca,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xca,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_eq_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xca,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xca,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_eq_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xca,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xca,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_eq_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xca,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xca,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_eq_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xca,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xca,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_eq_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xca,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xca,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_eq_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xca,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xca,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_eq_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xca,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xca,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_eq_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xca,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xca,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_eq_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xca,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xca,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_eq_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xca,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xca,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xca,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xca,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_eq_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xca,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xda,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_eq_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xda,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xda,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_eq_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xda,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xda,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xda,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xda,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_eq_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xda,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xda,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_eq_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xda,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xda,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_eq_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xda,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xda,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_eq_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xda,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xda,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_eq_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xda,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xda,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xda,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xda,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_eq_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xda,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xda,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_eq_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xda,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xda,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_eq_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xda,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x86,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x86,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x86,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_ge_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x86,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x86,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x86,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x86,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_ge_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x86,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x86,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ge_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x86,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x86,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ge_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x86,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x86,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ge_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x86,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x86,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ge_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x86,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x86,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ge_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x86,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x86,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ge_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x86,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x86,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ge_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x86,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x86,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ge_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x86,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x86,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x86,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x86,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ge_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x86,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x86,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ge_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x86,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x96,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x96,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x96,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_ge_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x96,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x96,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x96,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x96,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_ge_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x96,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x96,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ge_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x96,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x96,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ge_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x96,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x96,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ge_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x96,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x96,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ge_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x96,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x96,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ge_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x96,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x96,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ge_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x96,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x96,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ge_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x96,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x96,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ge_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x96,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x96,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x96,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x96,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ge_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x96,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x96,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ge_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x96,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xa6,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_ge_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa6,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xa6,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_ge_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa6,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xa6,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_ge_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa6,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xa6,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ge_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa6,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xa6,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa6,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa6,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ge_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa6,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xa6,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_ge_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa6,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xa6,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_ge_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa6,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xa6,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa6,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa6,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_ge_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa6,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xa6,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_ge_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa6,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xa6,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_ge_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa6,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb6,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xb6,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb6,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb6,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ge_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb6,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xb6,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xb6,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb6,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb6,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb6,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb6,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ge_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb6,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xb6,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ge_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb6,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xb6,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ge_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb6,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xb6,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb6,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb6,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xc6,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc6,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xc6,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_ge_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc6,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xc6,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc6,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc6,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ge_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc6,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xc6,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ge_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc6,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xc6,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ge_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc6,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xc6,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ge_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc6,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xc6,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ge_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc6,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xc6,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ge_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc6,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xc6,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_ge_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc6,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xc6,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ge_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc6,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xc6,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ge_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc6,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xc6,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ge_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc6,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xc6,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc6,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc6,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ge_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc6,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xd6,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_ge_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd6,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xd6,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ge_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd6,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xd6,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd6,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd6,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ge_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd6,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xd6,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ge_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd6,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xd6,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_ge_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd6,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xd6,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_ge_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd6,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xd6,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ge_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd6,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xd6,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd6,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd6,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_ge_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd6,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xd6,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_ge_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd6,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xd6,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_ge_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd6,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xbe,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xbe,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xbe,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xbe,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ge_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xbe,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xbe,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xbe,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xbe,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xbe,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xbe,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xbe,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ge_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xbe,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xbe,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ge_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xbe,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xbe,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ge_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xbe,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xbe,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xbe,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xbe,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xce,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xce,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xce,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_ge_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xce,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xce,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xce,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xce,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ge_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xce,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xce,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ge_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xce,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xce,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ge_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xce,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xce,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ge_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xce,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xce,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ge_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xce,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xce,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ge_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xce,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xce,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_ge_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xce,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xce,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ge_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xce,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xce,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ge_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xce,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xce,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ge_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xce,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xce,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xce,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xce,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ge_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xce,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xde,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_ge_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xde,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xde,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ge_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xde,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xde,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xde,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xde,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ge_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xde,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xde,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ge_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xde,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xde,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_ge_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xde,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xde,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_ge_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xde,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xde,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ge_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xde,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xde,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xde,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xde,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_ge_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xde,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xde,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_ge_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xde,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xde,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_ge_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xde,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x84,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x84,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x84,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_gt_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x84,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x84,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x84,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x84,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_gt_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x84,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x84,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_gt_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x84,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x84,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_gt_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x84,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x84,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_gt_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x84,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x84,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_gt_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x84,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x84,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_gt_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x84,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x84,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_gt_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x84,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x84,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_gt_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x84,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x84,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_gt_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x84,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x84,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x84,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x84,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_gt_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x84,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x84,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_gt_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x84,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x94,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x94,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x94,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_gt_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x94,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x94,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x94,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x94,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_gt_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x94,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x94,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_gt_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x94,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x94,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_gt_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x94,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x94,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_gt_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x94,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x94,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_gt_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x94,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x94,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_gt_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x94,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x94,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_gt_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x94,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x94,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_gt_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x94,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x94,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_gt_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x94,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x94,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x94,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x94,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_gt_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x94,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x94,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_gt_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x94,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xa4,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_gt_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa4,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xa4,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_gt_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa4,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xa4,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_gt_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa4,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xa4,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_gt_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa4,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xa4,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa4,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa4,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_gt_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa4,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xa4,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_gt_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa4,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xa4,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_gt_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa4,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xa4,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa4,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa4,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_gt_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa4,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xa4,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_gt_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa4,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xa4,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_gt_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa4,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb4,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xb4,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb4,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb4,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_gt_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb4,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xb4,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xb4,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb4,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb4,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb4,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb4,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_gt_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb4,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xb4,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_gt_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb4,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xb4,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_gt_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb4,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xb4,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb4,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb4,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xc4,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc4,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xc4,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_gt_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc4,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xc4,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc4,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc4,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_gt_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc4,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xc4,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_gt_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc4,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xc4,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_gt_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc4,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xc4,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_gt_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc4,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xc4,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_gt_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc4,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xc4,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_gt_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc4,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xc4,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_gt_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc4,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xc4,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_gt_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc4,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xc4,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_gt_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc4,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xc4,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_gt_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc4,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xc4,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc4,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc4,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_gt_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc4,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xd4,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_gt_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd4,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xd4,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_gt_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd4,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xd4,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd4,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd4,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_gt_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd4,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xd4,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_gt_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd4,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xd4,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_gt_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd4,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xd4,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_gt_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd4,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xd4,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_gt_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd4,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xd4,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd4,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd4,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_gt_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd4,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xd4,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_gt_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd4,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xd4,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_gt_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd4,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xbc,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xbc,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xbc,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xbc,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_gt_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xbc,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xbc,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xbc,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xbc,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xbc,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xbc,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xbc,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_gt_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xbc,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xbc,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_gt_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xbc,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xbc,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_gt_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xbc,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xbc,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xbc,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xbc,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xcc,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xcc,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xcc,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_gt_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xcc,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xcc,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xcc,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xcc,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_gt_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xcc,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xcc,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_gt_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xcc,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xcc,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_gt_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xcc,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xcc,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_gt_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xcc,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xcc,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_gt_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xcc,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xcc,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_gt_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xcc,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xcc,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_gt_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xcc,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xcc,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_gt_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xcc,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xcc,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_gt_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xcc,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xcc,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_gt_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xcc,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xcc,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xcc,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xcc,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_gt_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xcc,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xdc,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_gt_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xdc,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xdc,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_gt_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xdc,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xdc,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xdc,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xdc,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_gt_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xdc,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xdc,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_gt_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xdc,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xdc,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_gt_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xdc,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xdc,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_gt_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xdc,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xdc,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_gt_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xdc,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xdc,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xdc,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xdc,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_gt_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xdc,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xdc,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_gt_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xdc,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xdc,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_gt_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xdc,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x83,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x83,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x83,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_le_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x83,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x83,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x83,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x83,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_le_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x83,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x83,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_le_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x83,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x83,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_le_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x83,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x83,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_le_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x83,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x83,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_le_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x83,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x83,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_le_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x83,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x83,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_le_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x83,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x83,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_le_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x83,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x83,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_le_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x83,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x83,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x83,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x83,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_le_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x83,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x83,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_le_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x83,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x93,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x93,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x93,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_le_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x93,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x93,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x93,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x93,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_le_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x93,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x93,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_le_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x93,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x93,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_le_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x93,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x93,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_le_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x93,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x93,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_le_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x93,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x93,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_le_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x93,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x93,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_le_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x93,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x93,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_le_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x93,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x93,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_le_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x93,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x93,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x93,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x93,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_le_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x93,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x93,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_le_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x93,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xa3,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_le_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa3,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xa3,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_le_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa3,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xa3,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_le_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa3,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xa3,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_le_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa3,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xa3,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa3,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa3,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_le_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa3,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xa3,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_le_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa3,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xa3,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_le_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa3,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xa3,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa3,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa3,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_le_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa3,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xa3,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_le_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa3,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xa3,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_le_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa3,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb3,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xb3,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb3,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb3,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_le_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb3,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xb3,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xb3,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb3,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb3,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb3,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb3,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_le_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb3,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xb3,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_le_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb3,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xb3,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_le_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb3,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xb3,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb3,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb3,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xc3,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc3,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xc3,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_le_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc3,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xc3,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc3,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc3,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_le_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc3,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xc3,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_le_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc3,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xc3,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_le_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc3,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xc3,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_le_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc3,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xc3,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_le_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc3,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xc3,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_le_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc3,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xc3,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_le_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc3,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xc3,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_le_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc3,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xc3,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_le_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc3,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xc3,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_le_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc3,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xc3,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc3,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc3,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_le_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc3,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xd3,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_le_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd3,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xd3,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_le_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd3,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xd3,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd3,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd3,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_le_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd3,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xd3,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_le_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd3,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xd3,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_le_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd3,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xd3,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_le_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd3,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xd3,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_le_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd3,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xd3,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd3,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd3,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_le_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd3,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xd3,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_le_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd3,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xd3,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_le_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd3,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xbb,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xbb,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xbb,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xbb,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_le_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xbb,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xbb,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xbb,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xbb,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xbb,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xbb,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xbb,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_le_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xbb,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xbb,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_le_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xbb,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xbb,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_le_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xbb,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xbb,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xbb,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xbb,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xcb,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xcb,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xcb,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_le_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xcb,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xcb,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xcb,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xcb,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_le_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xcb,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xcb,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_le_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xcb,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xcb,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_le_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xcb,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xcb,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_le_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xcb,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xcb,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_le_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xcb,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xcb,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_le_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xcb,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xcb,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_le_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xcb,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xcb,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_le_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xcb,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xcb,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_le_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xcb,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xcb,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_le_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xcb,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xcb,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xcb,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xcb,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_le_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xcb,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xdb,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_le_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xdb,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xdb,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_le_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xdb,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xdb,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xdb,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xdb,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_le_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xdb,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xdb,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_le_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xdb,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xdb,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_le_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xdb,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xdb,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_le_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xdb,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xdb,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_le_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xdb,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xdb,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xdb,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xdb,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_le_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xdb,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xdb,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_le_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xdb,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xdb,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_le_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xdb,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x85,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lg_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x85,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x85,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_lg_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x85,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x85,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lg_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x85,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x85,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_lg_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x85,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x85,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_lg_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x85,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x85,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_lg_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x85,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x85,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_lg_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x85,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x85,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_lg_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x85,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x85,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_lg_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x85,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x85,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_lg_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x85,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x85,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_lg_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x85,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x85,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_lg_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x85,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x85,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lg_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x85,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x85,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_lg_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x85,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x85,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lg_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x85,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x95,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lg_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x95,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x95,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_lg_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x95,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x95,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lg_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x95,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x95,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_lg_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x95,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x95,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_lg_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x95,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x95,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_lg_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x95,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x95,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_lg_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x95,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x95,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_lg_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x95,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x95,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_lg_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x95,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x95,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_lg_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x95,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x95,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_lg_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x95,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x95,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_lg_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x95,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x95,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lg_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x95,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x95,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_lg_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x95,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x95,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lg_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x95,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xa5,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_lg_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa5,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xa5,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_lg_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa5,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xa5,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_lg_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa5,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xa5,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lg_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa5,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xa5,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lg_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa5,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa5,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_lg_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa5,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xa5,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_lg_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa5,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xa5,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_lg_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa5,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xa5,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lg_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa5,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa5,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_lg_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa5,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xa5,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_lg_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa5,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xa5,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_lg_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa5,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x81,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x81,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x81,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_lt_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x81,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x81,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x81,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x81,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_lt_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x81,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x81,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_lt_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x81,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x81,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_lt_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x81,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x81,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_lt_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x81,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x81,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_lt_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x81,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x81,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_lt_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x81,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x81,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_lt_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x81,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x81,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_lt_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x81,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x81,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_lt_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x81,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x81,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x81,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x81,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_lt_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x81,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x81,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lt_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x81,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x91,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x91,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x91,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_lt_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x91,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x91,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x91,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x91,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_lt_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x91,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x91,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_lt_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x91,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x91,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_lt_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x91,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x91,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_lt_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x91,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x91,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_lt_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x91,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x91,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_lt_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x91,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x91,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_lt_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x91,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x91,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_lt_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x91,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x91,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_lt_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x91,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x91,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x91,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x91,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_lt_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x91,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x91,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lt_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x91,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xa1,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_lt_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa1,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xa1,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_lt_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa1,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xa1,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_lt_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa1,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xa1,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lt_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa1,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xa1,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa1,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa1,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_lt_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa1,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xa1,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_lt_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa1,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xa1,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_lt_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa1,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xa1,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa1,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa1,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_lt_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa1,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xa1,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_lt_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa1,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xa1,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_lt_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa1,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb1,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xb1,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb1,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb1,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_lt_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb1,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xb1,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xb1,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb1,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb1,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb1,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb1,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_lt_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb1,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xb1,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_lt_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb1,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xb1,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_lt_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb1,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xb1,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb1,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb1,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xc1,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc1,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xc1,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_lt_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc1,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xc1,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc1,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc1,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lt_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc1,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xc1,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_lt_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc1,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xc1,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_lt_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc1,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xc1,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_lt_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc1,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xc1,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_lt_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc1,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xc1,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_lt_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc1,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xc1,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_lt_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc1,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xc1,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_lt_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc1,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xc1,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_lt_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc1,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xc1,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_lt_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc1,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xc1,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc1,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc1,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_lt_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc1,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xd1,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_lt_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd1,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xd1,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lt_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd1,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xd1,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd1,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd1,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_lt_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd1,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xd1,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_lt_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd1,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xd1,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_lt_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd1,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xd1,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_lt_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd1,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xd1,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_lt_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd1,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xd1,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd1,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd1,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_lt_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd1,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xd1,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_lt_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd1,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xd1,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_lt_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd1,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb9,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xb9,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb9,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb9,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_lt_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb9,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xb9,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xb9,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb9,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb9,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb9,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb9,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_lt_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb9,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xb9,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_lt_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb9,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xb9,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_lt_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb9,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xb9,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb9,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb9,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xc9,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc9,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xc9,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_lt_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc9,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xc9,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc9,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc9,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lt_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc9,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xc9,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_lt_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc9,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xc9,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_lt_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc9,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xc9,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_lt_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc9,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xc9,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_lt_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc9,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xc9,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_lt_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc9,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xc9,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_lt_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc9,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xc9,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_lt_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc9,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xc9,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_lt_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc9,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xc9,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_lt_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc9,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xc9,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc9,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc9,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_lt_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc9,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xd9,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_lt_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd9,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xd9,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lt_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xd9,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xd9,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd9,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd9,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_lt_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd9,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xd9,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_lt_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd9,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xd9,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_lt_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd9,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xd9,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_lt_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd9,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xd9,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_lt_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd9,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xd9,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd9,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd9,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_lt_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd9,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xd9,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_lt_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd9,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xd9,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_lt_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd9,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb5,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xb5,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb5,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb5,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ne_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb5,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xb5,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xb5,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb5,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb5,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb5,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb5,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ne_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb5,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xb5,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ne_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb5,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xb5,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ne_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb5,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xb5,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb5,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb5,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xc5,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ne_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc5,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xc5,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_ne_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc5,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xc5,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ne_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc5,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc5,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ne_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc5,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xc5,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ne_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc5,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xc5,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ne_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc5,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xc5,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ne_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc5,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xc5,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ne_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc5,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xc5,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ne_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc5,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xc5,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_ne_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc5,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xc5,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ne_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc5,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xc5,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ne_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc5,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xc5,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ne_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc5,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xc5,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ne_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc5,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc5,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ne_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc5,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xd5,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_ne_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd5,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xd5,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ne_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd5,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xd5,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ne_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd5,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd5,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ne_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd5,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xd5,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ne_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd5,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xd5,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_ne_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd5,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xd5,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_ne_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd5,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xd5,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ne_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd5,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xd5,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ne_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd5,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd5,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_ne_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd5,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xd5,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_ne_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd5,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xd5,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_ne_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd5,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xbd,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xbd,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xbd,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xbd,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ne_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xbd,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xbd,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xbd,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xbd,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xbd,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xbd,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xbd,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ne_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xbd,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xbd,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ne_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xbd,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xbd,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ne_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xbd,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xbd,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xbd,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xbd,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xcd,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ne_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xcd,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xcd,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_ne_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xcd,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xcd,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ne_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xcd,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xcd,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ne_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xcd,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xcd,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ne_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xcd,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xcd,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ne_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xcd,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xcd,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ne_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xcd,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xcd,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ne_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xcd,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xcd,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ne_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xcd,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xcd,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_ne_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xcd,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xcd,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ne_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xcd,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xcd,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ne_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xcd,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xcd,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ne_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xcd,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xcd,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ne_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xcd,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xcd,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ne_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xcd,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xdd,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_ne_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xdd,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xdd,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ne_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xdd,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xdd,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ne_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xdd,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xdd,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ne_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xdd,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xdd,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ne_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xdd,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xdd,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_ne_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xdd,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xdd,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_ne_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xdd,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xdd,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ne_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xdd,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xdd,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ne_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xdd,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xdd,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_ne_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xdd,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xdd,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_ne_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xdd,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xdd,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_ne_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xdd,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x8d,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_neq_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x8d,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x8d,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_neq_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x8d,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x8d,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_neq_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x8d,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x8d,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_neq_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x8d,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x8d,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_neq_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x8d,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x8d,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_neq_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x8d,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x8d,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_neq_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x8d,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x8d,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_neq_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x8d,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x8d,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_neq_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x8d,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x8d,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_neq_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x8d,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x8d,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_neq_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x8d,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x8d,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_neq_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x8d,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x8d,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_neq_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x8d,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x8d,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_neq_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x8d,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x8d,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_neq_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x8d,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x9d,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_neq_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x9d,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x9d,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_neq_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x9d,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x9d,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_neq_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x9d,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x9d,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_neq_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x9d,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x9d,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_neq_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x9d,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x9d,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_neq_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x9d,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x9d,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_neq_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x9d,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x9d,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_neq_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x9d,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x9d,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_neq_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x9d,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x9d,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_neq_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x9d,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x9d,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_neq_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x9d,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x9d,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_neq_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x9d,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x9d,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_neq_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x9d,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x9d,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_neq_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x9d,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x9d,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_neq_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x9d,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xad,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_neq_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xad,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xad,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_neq_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xad,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xad,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_neq_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xad,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xad,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_neq_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xad,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xad,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_neq_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xad,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xad,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_neq_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xad,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xad,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_neq_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xad,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xad,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_neq_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xad,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xad,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_neq_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xad,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xad,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_neq_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xad,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xad,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_neq_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xad,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xad,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_neq_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xad,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x89,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nge_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x89,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x89,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_nge_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x89,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x89,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nge_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x89,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x89,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_nge_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x89,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x89,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_nge_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x89,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x89,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nge_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x89,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x89,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_nge_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x89,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x89,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_nge_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x89,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x89,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_nge_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x89,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x89,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_nge_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x89,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x89,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_nge_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x89,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x89,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_nge_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x89,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x89,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nge_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x89,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x89,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_nge_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x89,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x89,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nge_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x89,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x99,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nge_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x99,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x99,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_nge_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x99,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x99,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nge_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x99,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x99,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_nge_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x99,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x99,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_nge_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x99,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x99,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nge_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x99,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x99,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_nge_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x99,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x99,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_nge_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x99,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x99,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_nge_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x99,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x99,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_nge_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x99,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x99,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_nge_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x99,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x99,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_nge_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x99,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x99,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nge_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x99,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x99,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_nge_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x99,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x99,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nge_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x99,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xa9,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_nge_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa9,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xa9,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_nge_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa9,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xa9,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_nge_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa9,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xa9,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nge_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa9,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xa9,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nge_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa9,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa9,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nge_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa9,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xa9,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_nge_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa9,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xa9,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_nge_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa9,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xa9,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nge_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa9,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa9,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_nge_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa9,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xa9,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_nge_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa9,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xa9,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_nge_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa9,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x8b,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x8b,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x8b,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_ngt_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x8b,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x8b,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x8b,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x8b,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_ngt_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x8b,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x8b,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x8b,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x8b,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x8b,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x8b,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x8b,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x8b,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x8b,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x8b,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x8b,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x8b,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x8b,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x8b,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x8b,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x8b,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x8b,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x8b,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x8b,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x8b,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x8b,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x8b,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x8b,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x9b,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x9b,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x9b,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_ngt_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x9b,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x9b,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ngt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x9b,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x9b,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_ngt_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x9b,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x9b,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x9b,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x9b,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x9b,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x9b,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x9b,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x9b,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x9b,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x9b,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x9b,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x9b,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x9b,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x9b,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x9b,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x9b,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x9b,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x9b,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ngt_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x9b,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x9b,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x9b,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x9b,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x9b,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xab,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_ngt_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xab,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xab,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_ngt_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xab,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xab,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_ngt_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xab,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xab,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ngt_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xab,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xab,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ngt_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xab,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xab,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ngt_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xab,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xab,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_ngt_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xab,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xab,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_ngt_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xab,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xab,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ngt_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xab,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xab,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_ngt_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xab,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xab,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_ngt_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xab,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xab,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_ngt_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xab,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x8c,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nle_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x8c,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x8c,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_nle_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x8c,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x8c,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nle_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x8c,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x8c,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_nle_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x8c,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x8c,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_nle_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x8c,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x8c,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nle_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x8c,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x8c,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_nle_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x8c,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x8c,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_nle_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x8c,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x8c,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_nle_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x8c,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x8c,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_nle_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x8c,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x8c,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_nle_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x8c,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x8c,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_nle_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x8c,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x8c,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nle_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x8c,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x8c,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_nle_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x8c,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x8c,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nle_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x8c,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x9c,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nle_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x9c,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x9c,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_nle_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x9c,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x9c,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nle_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x9c,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x9c,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_nle_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x9c,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x9c,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_nle_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x9c,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x9c,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nle_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x9c,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x9c,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_nle_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x9c,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x9c,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_nle_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x9c,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x9c,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_nle_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x9c,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x9c,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_nle_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x9c,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x9c,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_nle_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x9c,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x9c,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_nle_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x9c,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x9c,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nle_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x9c,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x9c,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_nle_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x9c,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x9c,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nle_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x9c,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xac,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_nle_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xac,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xac,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_nle_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xac,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xac,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_nle_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xac,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xac,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nle_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xac,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xac,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nle_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xac,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xac,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nle_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xac,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xac,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_nle_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xac,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xac,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_nle_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xac,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xac,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nle_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xac,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xac,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_nle_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xac,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xac,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_nle_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xac,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xac,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_nle_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xac,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x8a,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x8a,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x8a,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_nlg_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x8a,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x8a,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x8a,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x8a,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_nlg_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x8a,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x8a,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x8a,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x8a,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x8a,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x8a,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x8a,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x8a,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x8a,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x8a,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x8a,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x8a,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x8a,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x8a,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x8a,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x8a,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x8a,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x8a,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x8a,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x8a,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x8a,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x8a,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x8a,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x9a,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x9a,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x9a,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_nlg_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x9a,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x9a,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nlg_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x9a,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x9a,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_nlg_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x9a,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x9a,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x9a,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x9a,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x9a,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x9a,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x9a,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x9a,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x9a,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x9a,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x9a,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x9a,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x9a,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x9a,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x9a,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x9a,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x9a,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x9a,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nlg_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x9a,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x9a,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x9a,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x9a,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x9a,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xaa,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_nlg_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xaa,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xaa,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_nlg_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xaa,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xaa,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_nlg_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xaa,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xaa,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nlg_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xaa,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xaa,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nlg_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xaa,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xaa,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nlg_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xaa,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xaa,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_nlg_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xaa,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xaa,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_nlg_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xaa,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xaa,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nlg_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xaa,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xaa,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_nlg_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xaa,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xaa,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_nlg_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xaa,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xaa,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_nlg_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xaa,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x8e,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x8e,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x8e,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_nlt_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x8e,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x8e,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x8e,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x8e,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_nlt_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x8e,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x8e,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x8e,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x8e,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x8e,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x8e,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x8e,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x8e,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x8e,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x8e,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x8e,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x8e,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x8e,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x8e,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x8e,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x8e,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x8e,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x8e,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x8e,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x8e,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x8e,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x8e,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x8e,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x9e,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x9e,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x9e,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_nlt_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x9e,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x9e,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nlt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x9e,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x9e,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_nlt_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x9e,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x9e,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x9e,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x9e,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x9e,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x9e,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x9e,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x9e,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x9e,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x9e,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x9e,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x9e,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x9e,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x9e,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x9e,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x9e,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x9e,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x9e,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nlt_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x9e,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x9e,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x9e,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x9e,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x9e,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xae,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_nlt_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xae,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xae,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_nlt_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xae,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xae,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_nlt_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xae,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xae,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nlt_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xae,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xae,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nlt_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xae,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xae,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nlt_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xae,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xae,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_nlt_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xae,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xae,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_nlt_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xae,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xae,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nlt_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xae,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xae,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_nlt_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xae,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xae,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_nlt_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xae,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xae,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_nlt_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xae,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x87,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_o_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x87,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x87,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_o_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x87,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x87,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_o_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x87,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x87,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_o_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x87,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x87,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_o_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x87,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x87,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_o_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x87,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x87,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_o_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x87,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x87,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_o_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x87,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x87,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_o_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x87,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x87,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_o_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x87,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x87,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_o_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x87,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x87,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_o_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x87,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x87,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_o_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x87,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x87,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_o_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x87,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x87,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_o_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x87,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x97,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_o_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x97,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x97,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_o_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x97,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x97,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_o_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x97,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x97,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_o_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x97,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x97,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_o_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x97,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x97,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_o_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x97,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x97,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_o_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x97,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x97,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_o_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x97,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x97,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_o_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x97,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x97,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_o_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x97,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x97,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_o_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x97,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x97,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_o_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x97,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x97,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_o_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x97,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x97,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_o_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x97,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x97,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_o_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x97,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xa7,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_o_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa7,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xa7,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_o_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa7,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xa7,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_o_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa7,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xa7,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_o_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa7,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xa7,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_o_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa7,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa7,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_o_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa7,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xa7,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_o_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa7,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xa7,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_o_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa7,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xa7,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_o_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa7,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa7,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_o_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa7,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xa7,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_o_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa7,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xa7,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_o_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa7,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x88,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_u_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x88,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x88,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_u_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x88,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x88,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_u_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x88,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x88,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_u_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x88,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x88,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_u_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x88,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x88,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_u_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x88,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x88,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_u_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x88,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x88,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_u_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x88,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x88,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_u_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x88,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x88,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_u_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x88,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x88,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_u_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x88,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x88,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_u_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x88,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x88,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_u_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x88,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x88,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_u_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x88,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x88,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_u_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x88,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x98,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_u_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x98,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x98,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_u_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x98,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x98,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_u_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x98,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x98,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_u_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x98,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x98,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_u_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x98,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x98,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_u_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x98,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x98,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_u_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x98,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x98,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_u_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x98,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x98,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_u_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x98,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x98,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_u_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x98,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x98,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_u_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x98,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x98,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_u_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x98,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x98,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_u_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x98,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x98,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_u_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x98,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x98,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_u_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x98,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xa8,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_u_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa8,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xa8,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_u_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa8,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xa8,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_u_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa8,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xa8,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_u_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa8,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xa8,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_u_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa8,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa8,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_u_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa8,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xa8,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_u_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa8,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xa8,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_u_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa8,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xa8,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_u_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa8,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa8,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_u_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa8,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xa8,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_u_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa8,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xa8,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_u_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa8,0xd4,0x6a,0xf4,0x00,0x00]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3p_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3p_dpp16.txt
new file mode 100644
index 0000000..73e9d73
--- /dev/null
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3p_dpp16.txt
@@ -0,0 +1,10 @@
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250 %s
+
+# GFX1250: v_fma_mix_f32_bf16_e64_dpp v0, v1, v2, v3 row_ror:7 row_mask:0xf bank_mask:0x1 ; encoding: [0x00,0x00,0x3d,0xcc,0xfa,0x04,0x0e,0x04,0x01,0x27,0x01,0xf1]
+0x00,0x00,0x3d,0xcc,0xfa,0x04,0x0e,0x04,0x01,0x27,0x01,0xf1
+
+# GFX1250: v_fma_mixlo_bf16_e64_dpp v0, v1, v2, v3 op_sel_hi:[1,1,1] clamp quad_perm:[0,2,3,1] row_mask:0x0 bank_mask:0xf ; encoding: [0x00,0xc0,0x3e,0xcc,0xfa,0x04,0x0e,0x1c,0x01,0x78,0x00,0x0f]
+0x00,0xc0,0x3e,0xcc,0xfa,0x04,0x0e,0x1c,0x01,0x78,0x00,0x0f
+
+# GFX1250: v_fma_mixhi_bf16_e64_dpp v0, v1, v2, v3 op_sel_hi:[1,1,1] clamp quad_perm:[0,2,3,1] row_mask:0x0 bank_mask:0xf ; encoding: [0x00,0xc0,0x3f,0xcc,0xfa,0x04,0x0e,0x1c,0x01,0x78,0x00,0x0f]
+0x00,0xc0,0x3f,0xcc,0xfa,0x04,0x0e,0x1c,0x01,0x78,0x00,0x0f
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3p_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3p_dpp8.txt
new file mode 100644
index 0000000..27e7025
--- /dev/null
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3p_dpp8.txt
@@ -0,0 +1,19 @@
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250 %s
+
+# GFX1250: v_fma_mix_f32_bf16_e64_dpp v0, v1, v2, v3 clamp dpp8:[2,2,2,2,4,4,4,4] fi:1 ; encoding: [0x00,0x80,0x3d,0xcc,0xea,0x04,0x0e,0x04,0x01,0x92,0x44,0x92]
+0x00,0x80,0x3d,0xcc,0xea,0x04,0x0e,0x04,0x01,0x92,0x44,0x92
+
+# GFX1250: v_fma_mix_f32_bf16_e64_dpp v0, v1, v2, v3 dpp8:[2,2,2,2,4,4,4,4] ; encoding: [0x00,0x00,0x3d,0xcc,0xe9,0x04,0x0e,0x04,0x01,0x92,0x44,0x92]
+0x00,0x00,0x3d,0xcc,0xe9,0x04,0x0e,0x04,0x01,0x92,0x44,0x92
+
+# GFX1250: v_fma_mixlo_bf16_e64_dpp v0, |v1|, -v2, |v3| dpp8:[2,2,2,2,4,4,4,4] ; encoding: [0x00,0x05,0x3e,0xcc,0xe9,0x04,0x0e,0x44,0x01,0x92,0x44,0x92]
+0x00,0x05,0x3e,0xcc,0xe9,0x04,0x0e,0x44,0x01,0x92,0x44,0x92
+
+# GFX1250: v_fma_mixlo_bf16_e64_dpp v0, |v1|, -v2, |v3| op_sel:[1,0,0] op_sel_hi:[1,0,0] dpp8:[2,2,2,2,4,4,4,4] ; encoding: [0x00,0x0d,0x3e,0xcc,0xe9,0x04,0x0e,0x4c,0x01,0x92,0x44,0x92]
+0x00,0x0d,0x3e,0xcc,0xe9,0x04,0x0e,0x4c,0x01,0x92,0x44,0x92
+
+# GFX1250: v_fma_mixhi_bf16_e64_dpp v0, |v1|, -v2, |v3| dpp8:[2,2,2,2,4,4,4,4] ; encoding: [0x00,0x05,0x3f,0xcc,0xe9,0x04,0x0e,0x44,0x01,0x92,0x44,0x92]
+0x00,0x05,0x3f,0xcc,0xe9,0x04,0x0e,0x44,0x01,0x92,0x44,0x92
+
+# GFX1250: v_fma_mixhi_bf16_e64_dpp v0, |v1|, -v2, |v3| op_sel:[1,0,0] op_sel_hi:[1,0,0] dpp8:[2,2,2,2,4,4,4,4] ; encoding: [0x00,0x0d,0x3f,0xcc,0xe9,0x04,0x0e,0x4c,0x01,0x92,0x44,0x92]
+0x00,0x0d,0x3f,0xcc,0xe9,0x04,0x0e,0x4c,0x01,0x92,0x44,0x92
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_wmma_w32.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_wmma_w32.txt
index 2216348..a409dac 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_wmma_w32.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_wmma_w32.txt
@@ -999,3 +999,93 @@
0x04,0x44,0x88,0xcc,0x00,0x05,0x12,0x9c
# GFX1250: v_wmma_f32_32x16x128_f4 v[4:19], v[0:15], v[2:9], v[4:19] neg_lo:[0,0,1] neg_hi:[0,0,1] ; encoding: [0x04,0x44,0x88,0xcc,0x00,0x05,0x12,0x9c]
+
+0x00,0x00,0x35,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c
+# GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 ; encoding: [0x00,0x00,0x35,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+
+0x00,0x20,0x35,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c
+# GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 matrix_a_reuse ; encoding: [0x00,0x20,0x35,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+
+0x00,0x08,0x35,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c
+# GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 matrix_a_scale:MATRIX_SCALE_ROW1 ; encoding: [0x00,0x08,0x35,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+
+0x00,0x28,0x35,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c
+# GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_reuse ; encoding: [0x00,0x28,0x35,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+
+0x00,0x40,0x35,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c
+# GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 matrix_b_reuse ; encoding: [0x00,0x40,0x35,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+
+0x00,0x00,0x35,0xcc,0x00,0x00,0x00,0x08,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c
+# GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 matrix_b_scale:MATRIX_SCALE_ROW1 ; encoding: [0x00,0x00,0x35,0xcc,0x00,0x00,0x00,0x08,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+
+0x00,0x40,0x35,0xcc,0x00,0x00,0x00,0x08,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c
+# GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s0, s0 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_b_reuse ; encoding: [0x00,0x40,0x35,0xcc,0x00,0x00,0x00,0x08,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+
+0x00,0x68,0x35,0xcc,0x01,0x04,0x00,0x08,0x00,0x44,0x88,0xcc,0x08,0x31,0xa2,0x9c
+# GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], s1, s2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse matrix_b_reuse neg_lo:[0,0,1] neg_hi:[0,0,1] ; encoding: [0x00,0x68,0x35,0xcc,0x01,0x04,0x00,0x08,0x00,0x44,0x88,0xcc,0x08,0x31,0xa2,0x9c]
+
+0x00,0x00,0x35,0xcc,0x01,0x05,0x02,0x00,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c
+# GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 ; encoding: [0x00,0x00,0x35,0xcc,0x01,0x05,0x02,0x00,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c]
+
+0x00,0x68,0x35,0xcc,0x01,0x05,0x02,0x08,0x00,0x44,0x88,0xcc,0x08,0x31,0xa2,0x9c
+# GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse matrix_b_reuse neg_lo:[0,0,1] neg_hi:[0,0,1] ; encoding: [0x00,0x68,0x35,0xcc,0x01,0x05,0x02,0x08,0x00,0x44,0x88,0xcc,0x08,0x31,0xa2,0x9c]
+
+0x00,0x08,0x35,0xcc,0x01,0x05,0x02,0x08,0x00,0x44,0x88,0xcc,0x08,0x31,0xa2,0x9c
+# GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1] ; encoding: [0x00,0x08,0x35,0xcc,0x01,0x05,0x02,0x08,0x00,0x44,0x88,0xcc,0x08,0x31,0xa2,0x9c]
+
+0x00,0x00,0x35,0xcc,0x01,0x05,0x02,0x40,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c
+# GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3 ; encoding: [0x00,0x00,0x35,0xcc,0x01,0x05,0x02,0x40,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c]
+
+0x00,0x00,0x35,0xcc,0x01,0x05,0x02,0x20,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c
+# GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3 ; encoding: [0x00,0x00,0x35,0xcc,0x01,0x05,0x02,0x20,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c]
+
+0x00,0x02,0x35,0xcc,0x01,0x05,0x02,0x00,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c
+# GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3 ; encoding: [0x00,0x02,0x35,0xcc,0x01,0x05,0x02,0x00,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c]
+
+0x00,0x01,0x35,0xcc,0x01,0x05,0x02,0x00,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c
+# GFX1250: v_wmma_scale_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v1, v2 matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3 ; encoding: [0x00,0x01,0x35,0xcc,0x01,0x05,0x02,0x00,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c]
+
+0x00,0x00,0x3a,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c
+# GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] ; encoding: [0x00,0x00,0x3a,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+
+0x00,0x20,0x3a,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c
+# GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] matrix_a_reuse ; encoding: [0x00,0x20,0x3a,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+
+0x00,0x08,0x3a,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c
+# GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] matrix_a_scale:MATRIX_SCALE_ROW1 ; encoding: [0x00,0x08,0x3a,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+
+0x00,0x28,0x3a,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c
+# GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_a_reuse ; encoding: [0x00,0x28,0x3a,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+
+0x00,0x40,0x3a,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c
+# GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] matrix_b_reuse ; encoding: [0x00,0x40,0x3a,0xcc,0x00,0x00,0x00,0x00,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+
+0x00,0x00,0x3a,0xcc,0x00,0x00,0x00,0x08,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c
+# GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] matrix_b_scale:MATRIX_SCALE_ROW1 ; encoding: [0x00,0x00,0x3a,0xcc,0x00,0x00,0x00,0x08,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+
+0x00,0x40,0x3a,0xcc,0x00,0x00,0x00,0x08,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c
+# GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[0:7], v[0:15], s[0:1], s[0:1] matrix_b_scale:MATRIX_SCALE_ROW1 matrix_b_reuse ; encoding: [0x00,0x40,0x3a,0xcc,0x00,0x00,0x00,0x08,0x00,0x40,0x88,0xcc,0x08,0x01,0x02,0x1c]
+
+0x00,0x68,0x3a,0xcc,0x02,0x08,0x00,0x08,0x00,0x44,0x88,0xcc,0x08,0x31,0xa2,0x9c
+# GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], s[2:3], s[4:5] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse matrix_b_reuse neg_lo:[0,0,1] neg_hi:[0,0,1] ; encoding: [0x00,0x68,0x3a,0xcc,0x02,0x08,0x00,0x08,0x00,0x44,0x88,0xcc,0x08,0x31,0xa2,0x9c]
+
+0x00,0x00,0x3a,0xcc,0x02,0x09,0x02,0x00,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c
+# GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[2:3], v[4:5] ; encoding: [0x00,0x00,0x3a,0xcc,0x02,0x09,0x02,0x00,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c]
+
+0x00,0x68,0x3a,0xcc,0x02,0x09,0x02,0x08,0x00,0x44,0x88,0xcc,0x08,0x31,0xa2,0x9c
+# GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[2:3], v[4:5] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 matrix_a_reuse matrix_b_reuse neg_lo:[0,0,1] neg_hi:[0,0,1] ; encoding: [0x00,0x68,0x3a,0xcc,0x02,0x09,0x02,0x08,0x00,0x44,0x88,0xcc,0x08,0x31,0xa2,0x9c]
+
+0x00,0x08,0x3a,0xcc,0x02,0x09,0x02,0x08,0x00,0x44,0x88,0xcc,0x08,0x31,0xa2,0x9c
+# GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[2:3], v[4:5] matrix_a_scale:MATRIX_SCALE_ROW1 matrix_b_scale:MATRIX_SCALE_ROW1 neg_lo:[0,0,1] neg_hi:[0,0,1] ; encoding: [0x00,0x08,0x3a,0xcc,0x02,0x09,0x02,0x08,0x00,0x44,0x88,0xcc,0x08,0x31,0xa2,0x9c]
+
+0x00,0x00,0x3a,0xcc,0x02,0x09,0x02,0x40,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c
+# GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[2:3], v[4:5] matrix_a_scale_fmt:MATRIX_SCALE_FMT_E4M3 ; encoding: [0x00,0x00,0x3a,0xcc,0x02,0x09,0x02,0x40,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c]
+
+0x00,0x00,0x3a,0xcc,0x02,0x09,0x02,0x20,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c
+# GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[2:3], v[4:5] matrix_a_scale_fmt:MATRIX_SCALE_FMT_E5M3 ; encoding: [0x00,0x00,0x3a,0xcc,0x02,0x09,0x02,0x20,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c]
+
+0x00,0x02,0x3a,0xcc,0x02,0x09,0x02,0x00,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c
+# GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[2:3], v[4:5] matrix_b_scale_fmt:MATRIX_SCALE_FMT_E4M3 ; encoding: [0x00,0x02,0x3a,0xcc,0x02,0x09,0x02,0x00,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c]
+
+0x00,0x01,0x3a,0xcc,0x02,0x09,0x02,0x00,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c
+# GFX1250: v_wmma_scale16_f32_32x16x128_f4 v[0:15], v[8:23], v[24:31], v[40:55], v[2:3], v[4:5] matrix_b_scale_fmt:MATRIX_SCALE_FMT_E5M3 ; encoding: [0x00,0x01,0x3a,0xcc,0x02,0x09,0x02,0x00,0x00,0x40,0x88,0xcc,0x08,0x31,0xa2,0x1c]
diff --git a/llvm/test/TableGen/intrinsic-attrs.td b/llvm/test/TableGen/intrinsic-attrs.td
index 18309d7..92a90dc 100644
--- a/llvm/test/TableGen/intrinsic-attrs.td
+++ b/llvm/test/TableGen/intrinsic-attrs.td
@@ -30,12 +30,11 @@ def int_deref_ptr_ret : Intrinsic<[llvm_ptr_ty], [], [Dereferenceable<RetIndex,
// CHECK: getAttributes(LLVMContext &C, ID id,
// CHECK-NEXT: FunctionType *FT) {
-// CHECK: case 1:
-// CHECK-NEXT: return AttributeList::get(C, {
-// CHECK-NEXT: {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, FnAttrID)}
-// CHECK-NEXT: });
+// CHECK: case 1:
+// CHECK-NEXT: HasFnAttr = true;
+// CHECK-NEXT: break;
// CHECK-NEXT: case 0:
-// CHECK-NEXT: return AttributeList::get(C, {
-// CHECK-NEXT: {0, getIntrinsicArgAttributeSet(C, 0, FT->getContainedType(0))},
-// CHECK-NEXT: {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, FnAttrID)}
-// CHECK-NEXT: });
+// CHECK-NEXT: AS[0] = {0, getIntrinsicArgAttributeSet(C, 0, FT->getContainedType(0))};
+// CHECK-NEXT: HasFnAttr = true;
+// CHECK-NEXT: NumAttrs = 1
+// CHECK-NEXT: break;
diff --git a/llvm/test/Transforms/AtomicExpand/Xtensa/atomicrmw-expand.ll b/llvm/test/Transforms/AtomicExpand/Xtensa/atomicrmw-expand.ll
new file mode 100644
index 0000000..59916cc
--- /dev/null
+++ b/llvm/test/Transforms/AtomicExpand/Xtensa/atomicrmw-expand.ll
@@ -0,0 +1,2643 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -mtriple=xtensa-- -passes=atomic-expand %s | FileCheck %s
+
+define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_xchg_i8_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_exchange_1(ptr [[A]], i8 [[B]], i32 0)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw xchg ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_xchg_i8_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_exchange_1(ptr [[A]], i8 [[B]], i32 2)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw xchg ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_xchg_i8_release(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_exchange_1(ptr [[A]], i8 [[B]], i32 3)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw xchg ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_xchg_i8_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_exchange_1(ptr [[A]], i8 [[B]], i32 4)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw xchg ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_xchg_i8_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_exchange_1(ptr [[A]], i8 [[B]], i32 5)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw xchg ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_add_i8_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_add_1(ptr [[A]], i8 [[B]], i32 0)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw add ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_add_i8_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_add_1(ptr [[A]], i8 [[B]], i32 2)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw add ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_add_i8_release(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_add_1(ptr [[A]], i8 [[B]], i32 3)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw add ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_add_i8_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_add_1(ptr [[A]], i8 [[B]], i32 4)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw add ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_add_i8_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_add_1(ptr [[A]], i8 [[B]], i32 5)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw add ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_sub_i8_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_sub_1(ptr [[A]], i8 [[B]], i32 0)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw sub ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_sub_i8_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_sub_1(ptr [[A]], i8 [[B]], i32 2)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw sub ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_sub_i8_release(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_sub_1(ptr [[A]], i8 [[B]], i32 3)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw sub ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_sub_i8_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_sub_1(ptr [[A]], i8 [[B]], i32 4)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw sub ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_sub_i8_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_sub_1(ptr [[A]], i8 [[B]], i32 5)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw sub ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_and_i8_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_and_1(ptr [[A]], i8 [[B]], i32 0)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw and ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_and_i8_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_and_1(ptr [[A]], i8 [[B]], i32 2)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw and ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_and_i8_release(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_and_1(ptr [[A]], i8 [[B]], i32 3)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw and ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_and_i8_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_and_1(ptr [[A]], i8 [[B]], i32 4)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw and ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_and_i8_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_and_1(ptr [[A]], i8 [[B]], i32 5)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw and ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_nand_i8_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_nand_1(ptr [[A]], i8 [[B]], i32 0)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw nand ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_nand_i8_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_nand_1(ptr [[A]], i8 [[B]], i32 2)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw nand ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_nand_i8_release(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_nand_1(ptr [[A]], i8 [[B]], i32 3)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw nand ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_nand_i8_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_nand_1(ptr [[A]], i8 [[B]], i32 4)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw nand ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_nand_i8_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_nand_1(ptr [[A]], i8 [[B]], i32 5)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw nand ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_or_i8_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_or_1(ptr [[A]], i8 [[B]], i32 0)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw or ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_or_i8_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_or_1(ptr [[A]], i8 [[B]], i32 2)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw or ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_or_i8_release(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_or_1(ptr [[A]], i8 [[B]], i32 3)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw or ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_or_i8_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_or_1(ptr [[A]], i8 [[B]], i32 4)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw or ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_or_i8_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_or_1(ptr [[A]], i8 [[B]], i32 5)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw or ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_xor_i8_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_xor_1(ptr [[A]], i8 [[B]], i32 0)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw xor ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_xor_i8_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_xor_1(ptr [[A]], i8 [[B]], i32 2)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw xor ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_xor_i8_release(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_xor_1(ptr [[A]], i8 [[B]], i32 3)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw xor ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_xor_i8_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_xor_1(ptr [[A]], i8 [[B]], i32 4)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw xor ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_xor_i8_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @__atomic_fetch_xor_1(ptr [[A]], i8 [[B]], i32 5)
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %res = atomicrmw xor ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_max_i8_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 0, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i8 [[NEWLOADED]]
+;
+ %res = atomicrmw max ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_max_i8_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 2, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i8 [[NEWLOADED]]
+;
+ %res = atomicrmw max ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_max_i8_release(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 3, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i8 [[NEWLOADED]]
+;
+ %res = atomicrmw max ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_max_i8_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 4, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i8 [[NEWLOADED]]
+;
+ %res = atomicrmw max ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_max_i8_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i8 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 5, i32 5)
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i8 [[NEWLOADED]]
+;
+ %res = atomicrmw max ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_min_i8_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i8 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 0, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i8 [[NEWLOADED]]
+;
+ %res = atomicrmw min ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_min_i8_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i8 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 2, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i8 [[NEWLOADED]]
+;
+ %res = atomicrmw min ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_min_i8_release(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i8 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 3, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i8 [[NEWLOADED]]
+;
+ %res = atomicrmw min ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_min_i8_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i8 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 4, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i8 [[NEWLOADED]]
+;
+ %res = atomicrmw min ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_min_i8_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i8 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 5, i32 5)
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i8 [[NEWLOADED]]
+;
+ %res = atomicrmw min ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_umax_i8_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i8 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 0, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i8 [[NEWLOADED]]
+;
+ %res = atomicrmw umax ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_umax_i8_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i8 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 2, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i8 [[NEWLOADED]]
+;
+ %res = atomicrmw umax ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_umax_i8_release(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i8 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 3, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i8 [[NEWLOADED]]
+;
+ %res = atomicrmw umax ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_umax_i8_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i8 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 4, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i8 [[NEWLOADED]]
+;
+ %res = atomicrmw umax ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_umax_i8_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i8 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 5, i32 5)
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i8 [[NEWLOADED]]
+;
+ %res = atomicrmw umax ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_umin_i8_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i8 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 0, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i8 [[NEWLOADED]]
+;
+ %res = atomicrmw umin ptr %a, i8 %b monotonic
+ ret i8 %res
+}
+
+define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_umin_i8_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i8 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 2, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i8 [[NEWLOADED]]
+;
+ %res = atomicrmw umin ptr %a, i8 %b acquire
+ ret i8 %res
+}
+
+define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_umin_i8_release(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i8 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 3, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i8 [[NEWLOADED]]
+;
+ %res = atomicrmw umin ptr %a, i8 %b release
+ ret i8 %res
+}
+
+define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_umin_i8_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i8 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 4, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i8 [[NEWLOADED]]
+;
+ %res = atomicrmw umin ptr %a, i8 %b acq_rel
+ ret i8 %res
+}
+
+define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; CHECK-LABEL: define i8 @atomicrmw_umin_i8_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i8 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i8 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i8 [[LOADED]], i8 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: store i8 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_1(ptr [[A]], ptr [[TMP1]], i8 [[NEW]], i32 5, i32 5)
+; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 1, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i8, i1 } poison, i8 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i8, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i8 [[NEWLOADED]]
+;
+ %res = atomicrmw umin ptr %a, i8 %b seq_cst
+ ret i8 %res
+}
+
+define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_xchg_i16_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_exchange_2(ptr [[A]], i16 [[B]], i32 0)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw xchg ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_xchg_i16_acquire(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_xchg_i16_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_exchange_2(ptr [[A]], i16 [[B]], i32 2)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw xchg ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_xchg_i16_release(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_xchg_i16_release(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_exchange_2(ptr [[A]], i16 [[B]], i32 3)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw xchg ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_xchg_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_xchg_i16_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_exchange_2(ptr [[A]], i16 [[B]], i32 4)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw xchg ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_xchg_i16_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_exchange_2(ptr [[A]], i16 [[B]], i32 5)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw xchg ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_add_i16_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_add_2(ptr [[A]], i16 [[B]], i32 0)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw add ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_add_i16_acquire(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_add_i16_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_add_2(ptr [[A]], i16 [[B]], i32 2)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw add ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_add_i16_release(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_add_2(ptr [[A]], i16 [[B]], i32 3)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw add ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_add_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_add_i16_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_add_2(ptr [[A]], i16 [[B]], i32 4)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw add ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_add_i16_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_add_2(ptr [[A]], i16 [[B]], i32 5)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw add ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_sub_i16_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_sub_2(ptr [[A]], i16 [[B]], i32 0)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw sub ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_sub_i16_acquire(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_sub_i16_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_sub_2(ptr [[A]], i16 [[B]], i32 2)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw sub ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_sub_i16_release(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_sub_i16_release(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_sub_2(ptr [[A]], i16 [[B]], i32 3)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw sub ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_sub_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_sub_i16_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_sub_2(ptr [[A]], i16 [[B]], i32 4)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw sub ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_sub_i16_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_sub_2(ptr [[A]], i16 [[B]], i32 5)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw sub ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_and_i16_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_and_2(ptr [[A]], i16 [[B]], i32 0)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw and ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_and_i16_acquire(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_and_i16_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_and_2(ptr [[A]], i16 [[B]], i32 2)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw and ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_and_i16_release(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_and_i16_release(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_and_2(ptr [[A]], i16 [[B]], i32 3)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw and ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_and_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_and_i16_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_and_2(ptr [[A]], i16 [[B]], i32 4)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw and ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_and_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_and_i16_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_and_2(ptr [[A]], i16 [[B]], i32 5)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw and ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_nand_i16_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_nand_2(ptr [[A]], i16 [[B]], i32 0)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw nand ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_nand_i16_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_nand_2(ptr [[A]], i16 [[B]], i32 2)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw nand ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_nand_i16_release(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_nand_2(ptr [[A]], i16 [[B]], i32 3)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw nand ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_nand_i16_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_nand_2(ptr [[A]], i16 [[B]], i32 4)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw nand ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_nand_i16_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_nand_2(ptr [[A]], i16 [[B]], i32 5)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw nand ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_or_i16_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_or_2(ptr [[A]], i16 [[B]], i32 0)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw or ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_or_i16_acquire(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_or_i16_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_or_2(ptr [[A]], i16 [[B]], i32 2)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw or ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_or_i16_release(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_or_i16_release(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_or_2(ptr [[A]], i16 [[B]], i32 3)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw or ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_or_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_or_i16_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_or_2(ptr [[A]], i16 [[B]], i32 4)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw or ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_or_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_or_i16_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_or_2(ptr [[A]], i16 [[B]], i32 5)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw or ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_xor_i16_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_xor_2(ptr [[A]], i16 [[B]], i32 0)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw xor ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_xor_i16_acquire(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_xor_i16_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_xor_2(ptr [[A]], i16 [[B]], i32 2)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw xor ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_xor_i16_release(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_xor_i16_release(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_xor_2(ptr [[A]], i16 [[B]], i32 3)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw xor ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_xor_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_xor_i16_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_xor_2(ptr [[A]], i16 [[B]], i32 4)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw xor ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_xor_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_xor_i16_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i16 @__atomic_fetch_xor_2(ptr [[A]], i16 [[B]], i32 5)
+; CHECK-NEXT: ret i16 [[TMP1]]
+;
+ %res = atomicrmw xor ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_max_i16_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i16, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 0, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i16, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i16 [[NEWLOADED]]
+;
+ %res = atomicrmw max ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_max_i16_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i16, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 2, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i16, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i16 [[NEWLOADED]]
+;
+ %res = atomicrmw max ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_max_i16_release(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i16, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 3, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i16, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i16 [[NEWLOADED]]
+;
+ %res = atomicrmw max ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_max_i16_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i16, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 4, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i16, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i16 [[NEWLOADED]]
+;
+ %res = atomicrmw max ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_max_i16_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i16, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i16 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 5, i32 5)
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i16, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i16 [[NEWLOADED]]
+;
+ %res = atomicrmw max ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_min_i16_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i16, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i16 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 0, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i16, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i16 [[NEWLOADED]]
+;
+ %res = atomicrmw min ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_min_i16_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i16, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i16 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 2, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i16, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i16 [[NEWLOADED]]
+;
+ %res = atomicrmw min ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_min_i16_release(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i16, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i16 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 3, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i16, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i16 [[NEWLOADED]]
+;
+ %res = atomicrmw min ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_min_i16_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i16, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i16 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 4, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i16, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i16 [[NEWLOADED]]
+;
+ %res = atomicrmw min ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_min_i16_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i16, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i16 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 5, i32 5)
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i16, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i16 [[NEWLOADED]]
+;
+ %res = atomicrmw min ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_umax_i16_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i16, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i16 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 0, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i16, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i16 [[NEWLOADED]]
+;
+ %res = atomicrmw umax ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_umax_i16_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i16, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i16 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 2, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i16, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i16 [[NEWLOADED]]
+;
+ %res = atomicrmw umax ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_umax_i16_release(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i16, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i16 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 3, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i16, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i16 [[NEWLOADED]]
+;
+ %res = atomicrmw umax ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_umax_i16_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i16, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i16 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 4, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i16, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i16 [[NEWLOADED]]
+;
+ %res = atomicrmw umax ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_umax_i16_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i16, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i16 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 5, i32 5)
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i16, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i16 [[NEWLOADED]]
+;
+ %res = atomicrmw umax ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_umin_i16_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i16, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i16 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 0, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i16, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i16 [[NEWLOADED]]
+;
+ %res = atomicrmw umin ptr %a, i16 %b monotonic
+ ret i16 %res
+}
+
+define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_umin_i16_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i16, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i16 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 2, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i16, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i16 [[NEWLOADED]]
+;
+ %res = atomicrmw umin ptr %a, i16 %b acquire
+ ret i16 %res
+}
+
+define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_umin_i16_release(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i16, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i16 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 3, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i16, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i16 [[NEWLOADED]]
+;
+ %res = atomicrmw umin ptr %a, i16 %b release
+ ret i16 %res
+}
+
+define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_umin_i16_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i16, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i16 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 4, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i16, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i16 [[NEWLOADED]]
+;
+ %res = atomicrmw umin ptr %a, i16 %b acq_rel
+ ret i16 %res
+}
+
+define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; CHECK-LABEL: define i16 @atomicrmw_umin_i16_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i16, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i16 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i16 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i16 [[LOADED]], i16 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: store i16 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_2(ptr [[A]], ptr [[TMP1]], i16 [[NEW]], i32 5, i32 5)
+; CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 2, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i16, i1 } poison, i16 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i16, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i16, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i16 [[NEWLOADED]]
+;
+ %res = atomicrmw umin ptr %a, i16 %b seq_cst
+ ret i16 %res
+}
+
+define i32 @atomicrmw_xchg_i32_monotonic(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_xchg_i32_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_exchange_4(ptr [[A]], i32 [[B]], i32 0)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw xchg ptr %a, i32 %b monotonic
+ ret i32 %res
+}
+
+define i32 @atomicrmw_xchg_i32_acquire(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_xchg_i32_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_exchange_4(ptr [[A]], i32 [[B]], i32 2)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw xchg ptr %a, i32 %b acquire
+ ret i32 %res
+}
+
+define i32 @atomicrmw_xchg_i32_release(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_xchg_i32_release(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_exchange_4(ptr [[A]], i32 [[B]], i32 3)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw xchg ptr %a, i32 %b release
+ ret i32 %res
+}
+
+define i32 @atomicrmw_xchg_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_xchg_i32_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_exchange_4(ptr [[A]], i32 [[B]], i32 4)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw xchg ptr %a, i32 %b acq_rel
+ ret i32 %res
+}
+
+define i32 @atomicrmw_xchg_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_xchg_i32_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_exchange_4(ptr [[A]], i32 [[B]], i32 5)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw xchg ptr %a, i32 %b seq_cst
+ ret i32 %res
+}
+
+define i32 @atomicrmw_add_i32_monotonic(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_add_i32_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_add_4(ptr [[A]], i32 [[B]], i32 0)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw add ptr %a, i32 %b monotonic
+ ret i32 %res
+}
+
+define i32 @atomicrmw_add_i32_acquire(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_add_i32_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_add_4(ptr [[A]], i32 [[B]], i32 2)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw add ptr %a, i32 %b acquire
+ ret i32 %res
+}
+
+define i32 @atomicrmw_add_i32_release(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_add_i32_release(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_add_4(ptr [[A]], i32 [[B]], i32 3)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw add ptr %a, i32 %b release
+ ret i32 %res
+}
+
+define i32 @atomicrmw_add_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_add_i32_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_add_4(ptr [[A]], i32 [[B]], i32 4)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw add ptr %a, i32 %b acq_rel
+ ret i32 %res
+}
+
+define i32 @atomicrmw_add_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_add_i32_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_add_4(ptr [[A]], i32 [[B]], i32 5)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw add ptr %a, i32 %b seq_cst
+ ret i32 %res
+}
+
+define i32 @atomicrmw_sub_i32_monotonic(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_sub_i32_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_sub_4(ptr [[A]], i32 [[B]], i32 0)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw sub ptr %a, i32 %b monotonic
+ ret i32 %res
+}
+
+define i32 @atomicrmw_sub_i32_acquire(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_sub_i32_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_sub_4(ptr [[A]], i32 [[B]], i32 2)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw sub ptr %a, i32 %b acquire
+ ret i32 %res
+}
+
+define i32 @atomicrmw_sub_i32_release(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_sub_i32_release(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_sub_4(ptr [[A]], i32 [[B]], i32 3)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw sub ptr %a, i32 %b release
+ ret i32 %res
+}
+
+define i32 @atomicrmw_sub_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_sub_i32_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_sub_4(ptr [[A]], i32 [[B]], i32 4)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw sub ptr %a, i32 %b acq_rel
+ ret i32 %res
+}
+
+define i32 @atomicrmw_sub_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_sub_i32_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_sub_4(ptr [[A]], i32 [[B]], i32 5)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw sub ptr %a, i32 %b seq_cst
+ ret i32 %res
+}
+
+define i32 @atomicrmw_and_i32_monotonic(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_and_i32_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_and_4(ptr [[A]], i32 [[B]], i32 0)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw and ptr %a, i32 %b monotonic
+ ret i32 %res
+}
+
+define i32 @atomicrmw_and_i32_acquire(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_and_i32_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_and_4(ptr [[A]], i32 [[B]], i32 2)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw and ptr %a, i32 %b acquire
+ ret i32 %res
+}
+
+define i32 @atomicrmw_and_i32_release(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_and_i32_release(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_and_4(ptr [[A]], i32 [[B]], i32 3)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw and ptr %a, i32 %b release
+ ret i32 %res
+}
+
+define i32 @atomicrmw_and_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_and_i32_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_and_4(ptr [[A]], i32 [[B]], i32 4)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw and ptr %a, i32 %b acq_rel
+ ret i32 %res
+}
+
+define i32 @atomicrmw_and_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_and_i32_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_and_4(ptr [[A]], i32 [[B]], i32 5)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw and ptr %a, i32 %b seq_cst
+ ret i32 %res
+}
+
+;define i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind {
+; %res = atomicrmw nand ptr %a, i32 %b monotonic
+; ret i32 %res
+;}
+;define i32 @atomicrmw_nand_i32_acquire(ptr %a, i32 %b) nounwind {
+; %res = atomicrmw nand ptr %a, i32 %b acquire
+; ret i32 %res
+;}
+;define i32 @atomicrmw_nand_i32_release(ptr %a, i32 %b) nounwind {
+; %res = atomicrmw nand ptr %a, i32 %b release
+; ret i32 %res
+;}
+;define i32 @atomicrmw_nand_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; %res = atomicrmw nand ptr %a, i32 %b acq_rel
+; ret i32 %res
+;}
+;define i32 @atomicrmw_nand_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; %res = atomicrmw nand ptr %a, i32 %b seq_cst
+; ret i32 %res
+;}
+
+define i32 @atomicrmw_or_i32_monotonic(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_or_i32_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_or_4(ptr [[A]], i32 [[B]], i32 0)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw or ptr %a, i32 %b monotonic
+ ret i32 %res
+}
+
+define i32 @atomicrmw_or_i32_acquire(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_or_i32_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_or_4(ptr [[A]], i32 [[B]], i32 2)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw or ptr %a, i32 %b acquire
+ ret i32 %res
+}
+
+define i32 @atomicrmw_or_i32_release(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_or_i32_release(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_or_4(ptr [[A]], i32 [[B]], i32 3)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw or ptr %a, i32 %b release
+ ret i32 %res
+}
+
+define i32 @atomicrmw_or_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_or_i32_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_or_4(ptr [[A]], i32 [[B]], i32 4)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw or ptr %a, i32 %b acq_rel
+ ret i32 %res
+}
+
+define i32 @atomicrmw_or_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_or_i32_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_or_4(ptr [[A]], i32 [[B]], i32 5)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw or ptr %a, i32 %b seq_cst
+ ret i32 %res
+}
+
+define i32 @atomicrmw_xor_i32_monotonic(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_xor_i32_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_xor_4(ptr [[A]], i32 [[B]], i32 0)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw xor ptr %a, i32 %b monotonic
+ ret i32 %res
+}
+
+define i32 @atomicrmw_xor_i32_acquire(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_xor_i32_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_xor_4(ptr [[A]], i32 [[B]], i32 2)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw xor ptr %a, i32 %b acquire
+ ret i32 %res
+}
+
+define i32 @atomicrmw_xor_i32_release(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_xor_i32_release(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_xor_4(ptr [[A]], i32 [[B]], i32 3)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw xor ptr %a, i32 %b release
+ ret i32 %res
+}
+
+define i32 @atomicrmw_xor_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_xor_i32_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_xor_4(ptr [[A]], i32 [[B]], i32 4)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw xor ptr %a, i32 %b acq_rel
+ ret i32 %res
+}
+
+define i32 @atomicrmw_xor_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_xor_i32_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @__atomic_fetch_xor_4(ptr [[A]], i32 [[B]], i32 5)
+; CHECK-NEXT: ret i32 [[TMP1]]
+;
+ %res = atomicrmw xor ptr %a, i32 %b seq_cst
+ ret i32 %res
+}
+
+define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_max_i32_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 0, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw max ptr %a, i32 %b monotonic
+ ret i32 %res
+}
+
+define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_max_i32_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 2, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw max ptr %a, i32 %b acquire
+ ret i32 %res
+}
+
+define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_max_i32_release(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 3, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw max ptr %a, i32 %b release
+ ret i32 %res
+}
+
+define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_max_i32_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 4, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw max ptr %a, i32 %b acq_rel
+ ret i32 %res
+}
+
+define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_max_i32_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 5, i32 5)
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw max ptr %a, i32 %b seq_cst
+ ret i32 %res
+}
+
+define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_min_i32_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i32 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 0, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw min ptr %a, i32 %b monotonic
+ ret i32 %res
+}
+
+define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_min_i32_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i32 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 2, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw min ptr %a, i32 %b acquire
+ ret i32 %res
+}
+
+define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_min_i32_release(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i32 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 3, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw min ptr %a, i32 %b release
+ ret i32 %res
+}
+
+define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_min_i32_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i32 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 4, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw min ptr %a, i32 %b acq_rel
+ ret i32 %res
+}
+
+define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_min_i32_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sle i32 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 5, i32 5)
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw min ptr %a, i32 %b seq_cst
+ ret i32 %res
+}
+
+define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_umax_i32_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 0, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw umax ptr %a, i32 %b monotonic
+ ret i32 %res
+}
+
+define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_umax_i32_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 2, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw umax ptr %a, i32 %b acquire
+ ret i32 %res
+}
+
+define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_umax_i32_release(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 3, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw umax ptr %a, i32 %b release
+ ret i32 %res
+}
+
+define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_umax_i32_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 4, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw umax ptr %a, i32 %b acq_rel
+ ret i32 %res
+}
+
+define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_umax_i32_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 5, i32 5)
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw umax ptr %a, i32 %b seq_cst
+ ret i32 %res
+}
+
+define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_umin_i32_monotonic(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 0, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw umin ptr %a, i32 %b monotonic
+ ret i32 %res
+}
+
+define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_umin_i32_acquire(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 2, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw umin ptr %a, i32 %b acquire
+ ret i32 %res
+}
+
+define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_umin_i32_release(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 3, i32 0)
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw umin ptr %a, i32 %b release
+ ret i32 %res
+}
+
+define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_umin_i32_acq_rel(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 4, i32 2)
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw umin ptr %a, i32 %b acq_rel
+ ret i32 %res
+}
+
+define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; CHECK-LABEL: define i32 @atomicrmw_umin_i32_seq_cst(
+; CHECK-SAME: ptr [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT: br label %[[ATOMICRMW_START:.*]]
+; CHECK: [[ATOMICRMW_START]]:
+; CHECK-NEXT: [[LOADED:%.*]] = phi i32 [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], %[[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i32 [[LOADED]], [[B]]
+; CHECK-NEXT: [[NEW:%.*]] = select i1 [[TMP3]], i32 [[LOADED]], i32 [[B]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: store i32 [[LOADED]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(ptr [[A]], ptr [[TMP1]], i32 [[NEW]], i32 5, i32 5)
+; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP1]], align 4
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { i32, i1 } poison, i32 [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { i32, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP7]], 1
+; CHECK-NEXT: [[NEWLOADED]] = extractvalue { i32, i1 } [[TMP7]], 0
+; CHECK-NEXT: br i1 [[SUCCESS]], label %[[ATOMICRMW_END:.*]], label %[[ATOMICRMW_START]]
+; CHECK: [[ATOMICRMW_END]]:
+; CHECK-NEXT: ret i32 [[NEWLOADED]]
+;
+ %res = atomicrmw umin ptr %a, i32 %b seq_cst
+ ret i32 %res
+}
diff --git a/llvm/test/Transforms/AtomicExpand/Xtensa/lit.local.cfg b/llvm/test/Transforms/AtomicExpand/Xtensa/lit.local.cfg
new file mode 100644
index 0000000..e81bfa7
--- /dev/null
+++ b/llvm/test/Transforms/AtomicExpand/Xtensa/lit.local.cfg
@@ -0,0 +1,2 @@
+if not "Xtensa" in config.root.targets:
+ config.unsupported = True
diff --git a/llvm/test/Transforms/InstCombine/load-store-forward.ll b/llvm/test/Transforms/InstCombine/load-store-forward.ll
index 9a5db31..0f03f16 100644
--- a/llvm/test/Transforms/InstCombine/load-store-forward.ll
+++ b/llvm/test/Transforms/InstCombine/load-store-forward.ll
@@ -365,13 +365,10 @@ define i32 @load_after_memset_unknown(ptr %a, i8 %byte) {
ret i32 %v
}
-; TODO: Handle load at offset.
define i32 @load_after_memset_0_offset(ptr %a) {
; CHECK-LABEL: @load_after_memset_0_offset(
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 1 dereferenceable(16) [[A:%.*]], i8 0, i64 16, i1 false)
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[A]], i64 4
-; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[GEP]], align 4
-; CHECK-NEXT: ret i32 [[V]]
+; CHECK-NEXT: ret i32 0
;
call void @llvm.memset.p0.i64(ptr %a, i8 0, i64 16, i1 false)
%gep = getelementptr i8, ptr %a, i64 4
@@ -379,6 +376,41 @@ define i32 @load_after_memset_0_offset(ptr %a) {
ret i32 %v
}
+define i32 @load_after_memset_1_offset(ptr %a) {
+; CHECK-LABEL: @load_after_memset_1_offset(
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 1 dereferenceable(16) [[A:%.*]], i8 1, i64 16, i1 false)
+; CHECK-NEXT: ret i32 16843009
+;
+ call void @llvm.memset.p0.i64(ptr %a, i8 1, i64 16, i1 false)
+ %gep = getelementptr i8, ptr %a, i64 4
+ %v = load i32, ptr %gep
+ ret i32 %v
+}
+
+define i1 @load_after_memset_0_offset_i1(ptr %a) {
+; CHECK-LABEL: @load_after_memset_0_offset_i1(
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 1 dereferenceable(16) [[A:%.*]], i8 0, i64 16, i1 false)
+; CHECK-NEXT: ret i1 false
+;
+ call void @llvm.memset.p0.i64(ptr %a, i8 0, i64 16, i1 false)
+ %gep = getelementptr i1, ptr %a, i64 12
+ %v = load i1, ptr %gep
+ ret i1 %v
+}
+
+define i8 @neg_load_after_memset_0_neg_offset(ptr %a) {
+; CHECK-LABEL: @neg_load_after_memset_0_neg_offset(
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 2
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 1 dereferenceable(16) [[GEP]], i8 0, i64 16, i1 false)
+; CHECK-NEXT: [[V:%.*]] = load i8, ptr [[A]], align 1
+; CHECK-NEXT: ret i8 [[V]]
+;
+ %gep = getelementptr i8, ptr %a, i64 2
+ call void @llvm.memset.p0.i64(ptr %gep, i8 0, i64 16, i1 false)
+ %v = load i8, ptr %a
+ ret i8 %v
+}
+
define i32 @load_after_memset_0_offset_too_large(ptr %a) {
; CHECK-LABEL: @load_after_memset_0_offset_too_large(
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr noundef nonnull align 1 dereferenceable(16) [[A:%.*]], i8 0, i64 16, i1 false)
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll
index 795de3d..a8d9a0c 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll
@@ -45,8 +45,8 @@ define void @clamped_tc_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range(1,1
; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[DST]], [[ENTRY]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[P_OUT_TAIL_09:%.*]] = phi ptr [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[P_OUT_TAIL_09:%.*]] = phi ptr [ [[DST]], [[SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[TMP19:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 3
; CHECK-NEXT: [[SHR3:%.*]] = lshr i64 [[VAL]], [[TMP19]]
; CHECK-NEXT: [[CONV4:%.*]] = trunc i64 [[SHR3]] to i8
@@ -128,8 +128,8 @@ define void @clamped_tc_max_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range
; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[DST]], [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[P_OUT_TAIL_09:%.*]] = phi ptr [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[P_OUT_TAIL_09:%.*]] = phi ptr [ [[DST]], [[SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[TMP19:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 3
; CHECK-NEXT: [[SHR3:%.*]] = lshr i64 [[VAL]], [[TMP19]]
; CHECK-NEXT: [[CONV4:%.*]] = trunc i64 [[SHR3]] to i8
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
index 0232d88..4b895ae 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
@@ -459,7 +459,7 @@ define void @latch_branch_cost(ptr %dst) {
; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; PRED-NEXT: br label %[[LOOP:.*]]
; PRED: [[LOOP]]:
-; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; PRED-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV]]
; PRED-NEXT: store i8 0, ptr [[GEP]], align 1
; PRED-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
@@ -738,8 +738,8 @@ define void @multiple_exit_conditions(ptr %src, ptr noalias %dst) #1 {
; PRED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; PRED-NEXT: br label %[[LOOP:.*]]
; PRED: [[LOOP]]:
-; PRED-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ]
-; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[DST]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; PRED-NEXT: [[L:%.*]] = load i16, ptr [[SRC]], align 2
; PRED-NEXT: [[O:%.*]] = or i16 [[L]], 1
; PRED-NEXT: [[CONV:%.*]] = uitofp i16 [[O]] to double
@@ -865,7 +865,7 @@ define void @low_trip_count_fold_tail_scalarized_store(ptr %dst) {
; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; DEFAULT-NEXT: br label %[[LOOP:.*]]
; DEFAULT: [[LOOP]]:
-; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; DEFAULT-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i8
; DEFAULT-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV]]
; DEFAULT-NEXT: store i8 [[IV_TRUNC]], ptr [[GEP]], align 1
@@ -967,7 +967,7 @@ define void @low_trip_count_fold_tail_scalarized_store(ptr %dst) {
; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; PRED-NEXT: br label %[[LOOP:.*]]
; PRED: [[LOOP]]:
-; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; PRED-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i8
; PRED-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV]]
; PRED-NEXT: store i8 [[IV_TRUNC]], ptr [[GEP]], align 1
@@ -1554,7 +1554,7 @@ define void @redundant_branch_and_tail_folding(ptr %dst, i1 %c) {
; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; PRED-NEXT: br label %[[LOOP_HEADER:.*]]
; PRED: [[LOOP_HEADER]]:
-; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
; PRED-NEXT: br i1 [[C]], label %[[LOOP_LATCH]], label %[[THEN:.*]]
; PRED: [[THEN]]:
; PRED-NEXT: br label %[[LOOP_LATCH]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/extractvalue-no-scalarization-required.ll b/llvm/test/Transforms/LoopVectorize/AArch64/extractvalue-no-scalarization-required.ll
index 19f2a36..efcd810 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/extractvalue-no-scalarization-required.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/extractvalue-no-scalarization-required.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "^scalar.ph" --version 5
; REQUIRES: asserts
; RUN: opt -passes=loop-vectorize -mtriple=arm64-apple-ios %s -S -debug -disable-output 2>&1 | FileCheck --check-prefix=CM %s
@@ -22,23 +23,31 @@
; Check that the extractvalue operands are actually free in vector code.
-; FORCED: [[E1:%.+]] = extractvalue { i64, i64 } %sv, 0
-; FORCED-NEXT: %broadcast.splatinsert = insertelement <2 x i64> poison, i64 [[E1]], i64 0
-; FORCED-NEXT: %broadcast.splat = shufflevector <2 x i64> %broadcast.splatinsert, <2 x i64> poison, <2 x i32> zeroinitializer
-; FORCED-NEXT: [[E2:%.+]] = extractvalue { i64, i64 } %sv, 1
-; FORCED-NEXT: %broadcast.splatinsert1 = insertelement <2 x i64> poison, i64 [[E2]], i64 0
-; FORCED-NEXT: %broadcast.splat2 = shufflevector <2 x i64> %broadcast.splatinsert1, <2 x i64> poison, <2 x i32> zeroinitializer
-; FORCED-NEXT: [[ADD:%.+]] = add <2 x i64> %broadcast.splat, %broadcast.splat2
-
-; FORCED-LABEL: vector.body: ; preds = %vector.body, %vector.ph
-; FORCED-NEXT: %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-; FORCED-NEXT: [[GEP:%.+]] = getelementptr i64, ptr %dst, i32 %index
-; FORCED-NEXT: store <2 x i64> [[ADD]], ptr [[GEP]], align 4
-; FORCED-NEXT: %index.next = add nuw i32 %index, 2
-; FORCED-NEXT: [[C:%.+]] = icmp eq i32 %index.next, 1000
-; FORCED-NEXT: br i1 [[C]], label %middle.block, label %vector.body
-
define void @test1(ptr %dst, {i64, i64} %sv) {
+; FORCED-LABEL: define void @test1(
+; FORCED-SAME: ptr [[DST:%.*]], { i64, i64 } [[SV:%.*]]) {
+; FORCED-NEXT: [[ENTRY:.*:]]
+; FORCED-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; FORCED: [[VECTOR_PH]]:
+; FORCED-NEXT: [[TMP0:%.*]] = extractvalue { i64, i64 } [[SV]], 0
+; FORCED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TMP0]], i64 0
+; FORCED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer
+; FORCED-NEXT: [[TMP4:%.*]] = extractvalue { i64, i64 } [[SV]], 1
+; FORCED-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x i64> poison, i64 [[TMP4]], i64 0
+; FORCED-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT1]], <2 x i64> poison, <2 x i32> zeroinitializer
+; FORCED-NEXT: [[TMP1:%.*]] = add <2 x i64> [[BROADCAST_SPLAT]], [[BROADCAST_SPLAT2]]
+; FORCED-NEXT: br label %[[VECTOR_BODY:.*]]
+; FORCED: [[VECTOR_BODY]]:
+; FORCED-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; FORCED-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[DST]], i32 [[INDEX]]
+; FORCED-NEXT: store <2 x i64> [[TMP1]], ptr [[TMP2]], align 4
+; FORCED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2
+; FORCED-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
+; FORCED-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; FORCED: [[MIDDLE_BLOCK]]:
+; FORCED-NEXT: br [[EXIT:label %.*]]
+; FORCED: [[SCALAR_PH]]:
+;
entry:
br label %loop.body
@@ -70,25 +79,31 @@ declare float @powf(float, float) readnone nounwind
; CM: LV: Scalar loop costs: 14.
-; FORCED-LABEL: define void @test_getVectorCallCost
-
-; FORCED: [[E1:%.+]] = extractvalue { float, float } %sv, 0
-; FORCED-NEXT: %broadcast.splatinsert = insertelement <2 x float> poison, float [[E1]], i64 0
-; FORCED-NEXT: %broadcast.splat = shufflevector <2 x float> %broadcast.splatinsert, <2 x float> poison, <2 x i32> zeroinitializer
-; FORCED-NEXT: [[E2:%.+]] = extractvalue { float, float } %sv, 1
-; FORCED-NEXT: %broadcast.splatinsert1 = insertelement <2 x float> poison, float [[E2]], i64 0
-; FORCED-NEXT: %broadcast.splat2 = shufflevector <2 x float> %broadcast.splatinsert1, <2 x float> poison, <2 x i32> zeroinitializer
-
-; FORCED-LABEL: vector.body: ; preds = %vector.body, %vector.ph
-; FORCED-NEXT: %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
-; FORCED-NEXT: [[GEP1:%.+]] = getelementptr float, ptr %dst, i32 %index
-; FORCED-NEXT: [[POW:%.+]] = call <2 x float> @llvm.pow.v2f32(<2 x float> %broadcast.splat, <2 x float> %broadcast.splat2)
-; FORCED-NEXT: store <2 x float> [[POW]], ptr [[GEP1]], align 4
-; FORCED-NEXT: %index.next = add nuw i32 %index, 2
-; FORCED-NEXT: [[C:%.+]] = icmp eq i32 %index.next, 1000
-; FORCED-NEXT: br i1 [[C]], label %middle.block, label %vector.body
-
define void @test_getVectorCallCost(ptr %dst, {float, float} %sv) {
+; FORCED-LABEL: define void @test_getVectorCallCost(
+; FORCED-SAME: ptr [[DST:%.*]], { float, float } [[SV:%.*]]) {
+; FORCED-NEXT: [[ENTRY:.*:]]
+; FORCED-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; FORCED: [[VECTOR_PH]]:
+; FORCED-NEXT: [[TMP0:%.*]] = extractvalue { float, float } [[SV]], 0
+; FORCED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x float> poison, float [[TMP0]], i64 0
+; FORCED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x float> [[BROADCAST_SPLATINSERT]], <2 x float> poison, <2 x i32> zeroinitializer
+; FORCED-NEXT: [[TMP4:%.*]] = extractvalue { float, float } [[SV]], 1
+; FORCED-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x float> poison, float [[TMP4]], i64 0
+; FORCED-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <2 x float> [[BROADCAST_SPLATINSERT1]], <2 x float> poison, <2 x i32> zeroinitializer
+; FORCED-NEXT: br label %[[VECTOR_BODY:.*]]
+; FORCED: [[VECTOR_BODY]]:
+; FORCED-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; FORCED-NEXT: [[TMP1:%.*]] = getelementptr float, ptr [[DST]], i32 [[INDEX]]
+; FORCED-NEXT: [[TMP2:%.*]] = call <2 x float> @llvm.pow.v2f32(<2 x float> [[BROADCAST_SPLAT]], <2 x float> [[BROADCAST_SPLAT2]])
+; FORCED-NEXT: store <2 x float> [[TMP2]], ptr [[TMP1]], align 4
+; FORCED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2
+; FORCED-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
+; FORCED-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; FORCED: [[MIDDLE_BLOCK]]:
+; FORCED-NEXT: br [[EXIT:label %.*]]
+; FORCED: [[SCALAR_PH]]:
+;
entry:
br label %loop.body
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll b/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll
index fff99f1..41a624b 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll
@@ -75,8 +75,8 @@ define i32 @test_phi_iterator_invalidation(ptr %A, ptr noalias %B) {
; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[FOR_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[FOR_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[SEXT:%.*]] = sext i16 [[SCALAR_RECUR]] to i32
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV_NEXT]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll
index 1471896..cc36cdb 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll
@@ -397,7 +397,7 @@ define void @tail_predicate_without_optsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 %n)
; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; DEFAULT-NEXT: br label %[[FOR_BODY:.*]]
; DEFAULT: [[FOR_BODY]]:
-; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
; DEFAULT-NEXT: [[TMP72:%.*]] = trunc nuw nsw i64 [[INDVARS_IV]] to i8
; DEFAULT-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP72]]
; DEFAULT-NEXT: [[SHR:%.*]] = lshr i8 [[TMP72]], 1
@@ -546,7 +546,7 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32
; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; DEFAULT-NEXT: br label %[[FOR_BODY:.*]]
; DEFAULT: [[FOR_BODY]]:
-; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; DEFAULT-NEXT: [[TMP26:%.*]] = trunc nuw nsw i64 [[IV]] to i8
; DEFAULT-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP26]]
; DEFAULT-NEXT: [[SHR:%.*]] = lshr i8 [[TMP26]], 1
@@ -621,7 +621,7 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32
; OPTSIZE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]]
; OPTSIZE: [[FOR_BODY]]:
-; OPTSIZE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; OPTSIZE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; OPTSIZE-NEXT: [[TMP26:%.*]] = trunc nuw nsw i64 [[IV]] to i8
; OPTSIZE-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP26]]
; OPTSIZE-NEXT: [[SHR:%.*]] = lshr i8 [[TMP26]], 1
@@ -696,7 +696,7 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32
; MINSIZE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; MINSIZE-NEXT: br label %[[FOR_BODY:.*]]
; MINSIZE: [[FOR_BODY]]:
-; MINSIZE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; MINSIZE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; MINSIZE-NEXT: [[TMP26:%.*]] = trunc nuw nsw i64 [[IV]] to i8
; MINSIZE-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP26]]
; MINSIZE-NEXT: [[SHR:%.*]] = lshr i8 [[TMP26]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/outer_loop_test1_no_explicit_vect_width.ll b/llvm/test/Transforms/LoopVectorize/AArch64/outer_loop_test1_no_explicit_vect_width.ll
index 29795bc..5e99425 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/outer_loop_test1_no_explicit_vect_width.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/outer_loop_test1_no_explicit_vect_width.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "^scalar.ph" --version 5
; RUN: opt -S -passes=loop-vectorize -enable-vplan-native-path -mtriple aarch64-gnu-linux < %s | FileCheck %s
; extern int arr[8][8];
@@ -16,36 +17,6 @@
; }
;
-; CHECK-LABEL: @foo_i32(
-; CHECK-LABEL: vector.ph:
-; CHECK: %[[SplatVal:.*]] = insertelement <4 x i32> poison, i32 %n, i64 0
-; CHECK: %[[Splat:.*]] = shufflevector <4 x i32> %[[SplatVal]], <4 x i32> poison, <4 x i32> zeroinitializer
-
-; CHECK-LABEL: vector.body:
-; CHECK: %[[Ind:.*]] = phi i64 [ 0, %vector.ph ], [ %[[IndNext:.*]], %[[ForInc:.*]] ]
-; CHECK: %[[VecInd:.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %vector.ph ], [ %[[VecIndNext:.*]], %[[ForInc]] ]
-; CHECK: %[[AAddr:.*]] = getelementptr inbounds [8 x i32], ptr @arr2, i64 0, <4 x i64> %[[VecInd]]
-; CHECK: %[[VecIndTr:.*]] = trunc <4 x i64> %[[VecInd]] to <4 x i32>
-; CHECK: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %[[VecIndTr]], <4 x ptr> %[[AAddr]], i32 4, <4 x i1> splat (i1 true))
-; CHECK: %[[VecIndTr2:.*]] = trunc <4 x i64> %[[VecInd]] to <4 x i32>
-; CHECK: %[[StoreVal:.*]] = add nsw <4 x i32> %[[VecIndTr2]], %[[Splat]]
-; CHECK: br label %[[InnerLoop:.+]]
-
-; CHECK: [[InnerLoop]]:
-; CHECK: %[[InnerPhi:.*]] = phi <4 x i64> [ zeroinitializer, %vector.body ], [ %[[InnerPhiNext:.*]], %[[InnerLoop]] ]
-; CHECK: %[[AAddr2:.*]] = getelementptr inbounds [8 x [8 x i32]], ptr @arr, i64 0, <4 x i64> %[[InnerPhi]], <4 x i64> %[[VecInd]]
-; CHECK: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %[[StoreVal]], <4 x ptr> %[[AAddr2]], i32 4, <4 x i1> splat (i1 true))
-; CHECK: %[[InnerPhiNext]] = add nuw nsw <4 x i64> %[[InnerPhi]], splat (i64 1)
-; CHECK: %[[VecCond:.*]] = icmp eq <4 x i64> %[[InnerPhiNext]], splat (i64 8)
-; CHECK: %[[InnerCond:.*]] = extractelement <4 x i1> %[[VecCond]], i32 0
-; CHECK: br i1 %[[InnerCond]], label %[[ForInc]], label %[[InnerLoop]]
-
-; CHECK: [[ForInc]]:
-; CHECK: %[[IndNext]] = add nuw i64 %[[Ind]], 4
-; CHECK: %[[VecIndNext]] = add <4 x i64> %[[VecInd]], splat (i64 4)
-; CHECK: %[[Cmp:.*]] = icmp eq i64 %[[IndNext]], 8
-; CHECK: br i1 %[[Cmp]], label %middle.block, label %vector.body
-
@arr2 = external global [8 x i32], align 16
@arr = external global [8 x [8 x i32]], align 16
@@ -54,6 +25,40 @@
; Function Attrs: norecurse nounwind uwtable
define void @foo_i32(i32 %n) {
+; CHECK-LABEL: define void @foo_i32(
+; CHECK-SAME: i32 [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_LATCH:.*]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_LATCH]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [8 x i32], ptr @arr2, i64 0, <4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP1:%.*]] = trunc <4 x i64> [[VEC_IND]] to <4 x i32>
+; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> [[TMP1]], <4 x ptr> [[TMP0]], i32 4, <4 x i1> splat (i1 true))
+; CHECK-NEXT: [[TMP8:%.*]] = trunc <4 x i64> [[VEC_IND]] to <4 x i32>
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[TMP8]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: br label %[[FOR_BODY31:.*]]
+; CHECK: [[FOR_BODY31]]:
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ zeroinitializer, %[[VECTOR_BODY]] ], [ [[TMP4:%.*]], %[[FOR_BODY31]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [8 x [8 x i32]], ptr @arr, i64 0, <4 x i64> [[VEC_PHI]], <4 x i64> [[VEC_IND]]
+; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> [[TMP2]], <4 x ptr> [[TMP3]], i32 4, <4 x i1> splat (i1 true))
+; CHECK-NEXT: [[TMP4]] = add nuw nsw <4 x i64> [[VEC_PHI]], splat (i64 1)
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i64> [[TMP4]], splat (i64 8)
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP5]], i32 0
+; CHECK-NEXT: br i1 [[TMP6]], label %[[VECTOR_LATCH]], label %[[FOR_BODY31]]
+; CHECK: [[VECTOR_LATCH]]:
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 8
+; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br i1 true, [[FOR_END10:label %.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+;
entry:
br label %for.body
@@ -83,35 +88,39 @@ for.end10: ; preds = %for.inc8
ret void
}
-; CHECK-LABEL: @foo_i64(
-; CHECK-LABEL: vector.ph:
-; CHECK: %[[SplatVal:.*]] = insertelement <2 x i64> poison, i64 %n, i64 0
-; CHECK: %[[Splat:.*]] = shufflevector <2 x i64> %[[SplatVal]], <2 x i64> poison, <2 x i32> zeroinitializer
-
-; CHECK-LABEL: vector.body:
-; CHECK: %[[Ind:.*]] = phi i64 [ 0, %vector.ph ], [ %[[IndNext:.*]], %[[ForInc:.*]] ]
-; CHECK: %[[VecInd:.*]] = phi <2 x i64> [ <i64 0, i64 1>, %vector.ph ], [ %[[VecIndNext:.*]], %[[ForInc]] ]
-; CHECK: %[[AAddr:.*]] = getelementptr inbounds [8 x i64], ptr @arrX, i64 0, <2 x i64> %[[VecInd]]
-; CHECK: call void @llvm.masked.scatter.v2i64.v2p0(<2 x i64> %[[VecInd]], <2 x ptr> %[[AAddr]], i32 4, <2 x i1> splat (i1 true))
-; CHECK: %[[StoreVal:.*]] = add nsw <2 x i64> %[[VecInd]], %[[Splat]]
-; CHECK: br label %[[InnerLoop:.+]]
-
-; CHECK: [[InnerLoop]]:
-; CHECK: %[[InnerPhi:.*]] = phi <2 x i64> [ zeroinitializer, %vector.body ], [ %[[InnerPhiNext:.*]], %[[InnerLoop]] ]
-; CHECK: %[[AAddr2:.*]] = getelementptr inbounds [8 x [8 x i64]], ptr @arrY, i64 0, <2 x i64> %[[InnerPhi]], <2 x i64> %[[VecInd]]
-; CHECK: call void @llvm.masked.scatter.v2i64.v2p0(<2 x i64> %[[StoreVal]], <2 x ptr> %[[AAddr2]], i32 4, <2 x i1> splat (i1 true))
-; CHECK: %[[InnerPhiNext]] = add nuw nsw <2 x i64> %[[InnerPhi]], splat (i64 1)
-; CHECK: %[[VecCond:.*]] = icmp eq <2 x i64> %[[InnerPhiNext]], splat (i64 8)
-; CHECK: %[[InnerCond:.*]] = extractelement <2 x i1> %[[VecCond]], i32 0
-; CHECK: br i1 %[[InnerCond]], label %[[ForInc]], label %[[InnerLoop]]
-
-; CHECK: [[ForInc]]:
-; CHECK: %[[IndNext]] = add nuw i64 %[[Ind]], 2
-; CHECK: %[[VecIndNext]] = add <2 x i64> %[[VecInd]], splat (i64 2)
-; CHECK: %[[Cmp:.*]] = icmp eq i64 %[[IndNext]], 8
-; CHECK: br i1 %[[Cmp]], label %middle.block, label %vector.body
-; Function Attrs: norecurse nounwind uwtable
define void @foo_i64(i64 %n) {
+; CHECK-LABEL: define void @foo_i64(
+; CHECK-SAME: i64 [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[N]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_LATCH:.*]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ <i64 0, i64 1>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_LATCH]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [8 x i64], ptr @arrX, i64 0, <2 x i64> [[VEC_IND]]
+; CHECK-NEXT: call void @llvm.masked.scatter.v2i64.v2p0(<2 x i64> [[VEC_IND]], <2 x ptr> [[TMP0]], i32 4, <2 x i1> splat (i1 true))
+; CHECK-NEXT: [[TMP1:%.*]] = add nsw <2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: br label %[[FOR_BODY31:.*]]
+; CHECK: [[FOR_BODY31]]:
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, %[[VECTOR_BODY]] ], [ [[TMP3:%.*]], %[[FOR_BODY31]] ]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [8 x [8 x i64]], ptr @arrY, i64 0, <2 x i64> [[VEC_PHI]], <2 x i64> [[VEC_IND]]
+; CHECK-NEXT: call void @llvm.masked.scatter.v2i64.v2p0(<2 x i64> [[TMP1]], <2 x ptr> [[TMP2]], i32 4, <2 x i1> splat (i1 true))
+; CHECK-NEXT: [[TMP3]] = add nuw nsw <2 x i64> [[VEC_PHI]], splat (i64 1)
+; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <2 x i64> [[TMP3]], splat (i64 8)
+; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0
+; CHECK-NEXT: br i1 [[TMP5]], label %[[VECTOR_LATCH]], label %[[FOR_BODY31]]
+; CHECK: [[VECTOR_LATCH]]:
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
+; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 8
+; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br i1 true, [[FOR_END10:label %.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll
index d9a3a71..830e7da 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll
@@ -59,8 +59,8 @@ define i32 @pr70988(ptr %src, i32 %n) {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[INDUC:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDUC_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[MAX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[TMP24:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[INDUC:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDUC_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[MAX:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[TMP24:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[INDUC]]
; CHECK-NEXT: [[TMP22:%.*]] = load ptr, ptr [[GEP]], align 8
; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll
index 08d35f7..381d2e1 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll
@@ -256,10 +256,10 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2)
; PRED-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ]
; PRED-NEXT: br label %[[LOOP:.*]]
; PRED: [[LOOP]]:
-; PRED-NEXT: [[TMP45:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP53:%.*]], %[[LOOP]] ]
-; PRED-NEXT: [[SCALAR_RECUR10:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT8]], %[[SCALAR_PH]] ], [ [[TMP45]], %[[LOOP]] ]
-; PRED-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], %[[LOOP]] ]
-; PRED-NEXT: [[SUM_RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RED_2:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[TMP45:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[TMP53:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[SCALAR_RECUR10:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[TMP45]], %[[LOOP]] ]
+; PRED-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[SUM_RED:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[RED_2:%.*]], %[[LOOP]] ]
; PRED-NEXT: [[TMP52:%.*]] = add i64 [[Y]], 1
; PRED-NEXT: [[GEP_1:%.*]] = getelementptr i32, ptr [[SRC_1]], i64 [[TMP52]]
; PRED-NEXT: [[TMP53]] = load i32, ptr [[GEP_1]], align 4
@@ -491,8 +491,8 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 {
; PRED-NEXT: [[BC_MERGE_RDX:%.*]] = phi i16 [ 0, %[[ENTRY]] ]
; PRED-NEXT: br label %[[LOOP:.*]]
; PRED: [[LOOP]]:
-; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; PRED-NEXT: [[RED:%.*]] = phi i16 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[RED:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ]
; PRED-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
; PRED-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
; PRED-NEXT: [[DIV:%.*]] = udiv i16 [[L]], [[X]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
index a60d35d..0cad053 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
@@ -159,8 +159,8 @@ define float @fadd_strict(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-ORDERED-TF: for.body:
-; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-ORDERED-TF-NEXT: [[ADD]] = fadd float [[TMP15]], [[SUM_07]]
@@ -420,8 +420,8 @@ define float @fadd_strict_unroll(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-ORDERED-TF: for.body:
-; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; CHECK-ORDERED-TF-NEXT: [[TMP45:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-ORDERED-TF-NEXT: [[ADD]] = fadd float [[TMP45]], [[SUM_07]]
@@ -673,9 +673,9 @@ define void @fadd_strict_interleave(ptr noalias nocapture readonly %a, ptr noali
; CHECK-ORDERED-TF-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY]] ]
; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-ORDERED-TF: for.body:
-; CHECK-ORDERED-TF-NEXT: [[ADD_PHI1:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD2:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[ADD_PHI2:%.*]] = phi float [ [[BC_MERGE_RDX2]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[ADD_PHI1:%.*]] = phi float [ [[A2]], [[SCALAR_PH]] ], [ [[ADD2:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[ADD_PHI2:%.*]] = phi float [ [[A1]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDXB1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4
; CHECK-ORDERED-TF-NEXT: [[ADD1]] = fadd float [[TMP22]], [[ADD_PHI2]]
@@ -918,8 +918,8 @@ define float @fadd_of_sum(ptr noalias nocapture readonly %a, ptr noalias nocaptu
; CHECK-ORDERED-TF-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ]
; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-ORDERED-TF: for.body:
-; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-ORDERED-TF-NEXT: [[RES_014:%.*]] = phi float [ [[RDX:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-ORDERED-TF-NEXT: [[RES_014:%.*]] = phi float [ [[RDX:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ]
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
@@ -1148,8 +1148,8 @@ define float @fadd_conditional(ptr noalias nocapture readonly %a, ptr noalias no
; CHECK-ORDERED-TF-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 1.000000e+00, [[ENTRY]] ]
; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-ORDERED-TF: for.body:
-; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; CHECK-ORDERED-TF-NEXT: [[RES:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[FADD:%.*]], [[FOR_INC]] ]
+; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
+; CHECK-ORDERED-TF-NEXT: [[RES:%.*]] = phi float [ 1.000000e+00, [[SCALAR_PH]] ], [ [[FADD:%.*]], [[FOR_INC]] ]
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-ORDERED-TF-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP18]], 0.000000e+00
@@ -1623,8 +1623,8 @@ define float @fmuladd_strict(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-ORDERED-TF: for.body:
-; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; CHECK-ORDERED-TF-NEXT: [[TMP59:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
@@ -1945,8 +1945,8 @@ define float @fmuladd_strict_fmf(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-ORDERED-TF: for.body:
-; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; CHECK-ORDERED-TF-NEXT: [[TMP59:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
index 51efbe9..d32b898 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
@@ -114,7 +114,7 @@ define void @cost_store_i8(ptr %dst) #0 {
; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; PRED-NEXT: br label [[LOOP:%.*]]
; PRED: loop:
-; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; PRED-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV]]
; PRED-NEXT: store i8 0, ptr [[GEP]], align 1
; PRED-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll
index f4982e6..d6f8b8e 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll
@@ -48,8 +48,8 @@ define i32 @add_reduction_i32(ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK: while.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]]
; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[GEP]], align 4
; CHECK-NEXT: [[RED_NEXT]] = add i32 [[RED]], [[VAL]]
@@ -101,8 +101,8 @@ define i32 @add_reduction_i32(ptr %ptr, i64 %n) #0 {
; CHECK-IN-LOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ]
; CHECK-IN-LOOP-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK-IN-LOOP: while.body:
-; CHECK-IN-LOOP-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-IN-LOOP-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-IN-LOOP-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-IN-LOOP-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-IN-LOOP-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]]
; CHECK-IN-LOOP-NEXT: [[VAL:%.*]] = load i32, ptr [[GEP]], align 4
; CHECK-IN-LOOP-NEXT: [[RED_NEXT]] = add i32 [[RED]], [[VAL]]
@@ -171,8 +171,8 @@ define float @add_reduction_f32(ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK: while.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi float [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[RED:%.*]] = phi float [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[PTR]], i64 [[INDEX]]
; CHECK-NEXT: [[VAL:%.*]] = load float, ptr [[GEP]], align 4
; CHECK-NEXT: [[RED_NEXT]] = fadd float [[RED]], [[VAL]]
@@ -223,8 +223,8 @@ define float @add_reduction_f32(ptr %ptr, i64 %n) #0 {
; CHECK-IN-LOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-IN-LOOP-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK-IN-LOOP: while.body:
-; CHECK-IN-LOOP-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-IN-LOOP-NEXT: [[RED:%.*]] = phi float [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-IN-LOOP-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-IN-LOOP-NEXT: [[RED:%.*]] = phi float [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ]
; CHECK-IN-LOOP-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[PTR]], i64 [[INDEX]]
; CHECK-IN-LOOP-NEXT: [[VAL:%.*]] = load float, ptr [[GEP]], align 4
; CHECK-IN-LOOP-NEXT: [[RED_NEXT]] = fadd float [[RED]], [[VAL]]
@@ -298,8 +298,8 @@ define i32 @cond_xor_reduction(ptr noalias %a, ptr noalias %cond, i64 %N) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 7, [[ENTRY]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
+; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ 7, [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[IV]]
; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP26]], 5
@@ -362,8 +362,8 @@ define i32 @cond_xor_reduction(ptr noalias %a, ptr noalias %cond, i64 %N) #0 {
; CHECK-IN-LOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 7, [[ENTRY]] ]
; CHECK-IN-LOOP-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-IN-LOOP: for.body:
-; CHECK-IN-LOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; CHECK-IN-LOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ]
+; CHECK-IN-LOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
+; CHECK-IN-LOOP-NEXT: [[RDX:%.*]] = phi i32 [ 7, [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ]
; CHECK-IN-LOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[IV]]
; CHECK-IN-LOOP-NEXT: [[TMP24:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-IN-LOOP-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP24]], 5
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll
index 473fabf..e214e82 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll
@@ -1,17 +1,43 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "^scalar.ph" --version 5
; RUN: opt -S -passes=loop-vectorize < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
define void @widen_extractvalue(ptr %dst, {i64, i64} %sv) #0 {
-; CHECK-LABEL: @widen_extractvalue(
-; CHECK: [[EXTRACT0:%.*]] = extractvalue { i64, i64 } [[SV:%.*]], 0
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[EXTRACT0]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: [[EXTRACT1:%.*]] = extractvalue { i64, i64 } [[SV]], 1
-; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[EXTRACT1]], i64 0
-; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT1]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK: [[ADD:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT]], [[DOTSPLAT2]]
-; CHECK: vector.body:
+; CHECK-LABEL: define void @widen_extractvalue(
+; CHECK-SAME: ptr [[DST:%.*]], { i64, i64 } [[SV:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i32 [[TMP0]], 2
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 1000, [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP2]], 2
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 1000, [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 1000, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 2
+; CHECK-NEXT: [[EXTRACT0:%.*]] = extractvalue { i64, i64 } [[SV]], 0
+; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[EXTRACT0]], i64 0
+; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT1]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { i64, i64 } [[SV]], 1
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP10]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP7:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT2]], [[BROADCAST_SPLAT2]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[DST]], i32 [[INDEX]]
+; CHECK-NEXT: store <vscale x 2 x i64> [[TMP7]], ptr [[TMP8]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 1000, [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+;
entry:
br label %loop.body
@@ -38,4 +64,3 @@ attributes #0 = { "target-features"="+sve" }
!3 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
!4 = !{!"llvm.loop.interleave.count", i32 1}
!5 = !{!"llvm.loop.vectorize.enable", i1 true}
-
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll
index 9929f35..5c6328e 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll
@@ -35,7 +35,6 @@ define void @pointer_induction_used_as_vector(ptr noalias %start.1, ptr noalias
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START_2]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP11:%.*]] = mul i64 1, [[TMP6]]
; CHECK-NEXT: [[TMP13:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; CHECK-NEXT: [[TMP15:%.*]] = mul <vscale x 2 x i64> [[TMP13]], splat (i64 1)
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP15]]
@@ -48,6 +47,7 @@ define void @pointer_induction_used_as_vector(ptr noalias %start.1, ptr noalias
; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 2 x i8> [[WIDE_LOAD]], splat (i8 1)
; CHECK-NEXT: store <vscale x 2 x i8> [[TMP20]], ptr [[TMP18]], align 1
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; CHECK-NEXT: [[TMP11:%.*]] = mul i64 1, [[TMP6]]
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -119,7 +119,6 @@ define void @pointer_induction(ptr noalias %start, i64 %N) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX2:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP10:%.*]] = mul i64 1, [[TMP6]]
; CHECK-NEXT: [[TMP12:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; CHECK-NEXT: [[TMP14:%.*]] = mul <vscale x 2 x i64> [[TMP12]], splat (i64 1)
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP14]]
@@ -128,6 +127,7 @@ define void @pointer_induction(ptr noalias %start, i64 %N) {
; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 2 x i8> [[WIDE_LOAD]], splat (i8 1)
; CHECK-NEXT: store <vscale x 2 x i8> [[TMP17]], ptr [[TMP15]], align 1
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX2]], [[TMP6]]
+; CHECK-NEXT: [[TMP10:%.*]] = mul i64 1, [[TMP6]]
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP10]]
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
index 6947884..2c88e0e 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
@@ -239,7 +239,6 @@ define i32 @pointer_iv_mixed(ptr noalias %a, ptr noalias %b, i64 %n) #0 {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[A]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP5]], 3
; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; CHECK-NEXT: [[TMP10:%.*]] = shl <vscale x 2 x i64> [[TMP9]], splat (i64 2)
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP10]]
@@ -250,6 +249,7 @@ define i32 @pointer_iv_mixed(ptr noalias %a, ptr noalias %b, i64 %n) #0 {
; CHECK-NEXT: [[TMP12]] = add <vscale x 2 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
; CHECK-NEXT: store <vscale x 2 x ptr> [[VECTOR_GEP]], ptr [[NEXT_GEP]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP5]], 3
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP8]]
; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
@@ -313,7 +313,6 @@ define void @phi_used_in_vector_compare_and_scalar_indvar_update_and_store(ptr %
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[PTR:%.*]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP0]], 2
; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; CHECK-NEXT: [[TMP5:%.*]] = shl <vscale x 2 x i64> [[TMP4]], splat (i64 1)
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP5]]
@@ -321,6 +320,7 @@ define void @phi_used_in_vector_compare_and_scalar_indvar_update_and_store(ptr %
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <vscale x 2 x ptr> [[VECTOR_GEP]], i64 0
; CHECK-NEXT: call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> zeroinitializer, ptr [[TMP7]], i32 2, <vscale x 2 x i1> [[TMP6]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP0]], 2
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP3]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll b/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll
index a11896a..124abc6 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll
@@ -80,7 +80,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; DATA-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; DATA-NEXT: br label [[WHILE_BODY:%.*]]
; DATA: while.body:
-; DATA-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; DATA-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
; DATA-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]]
; DATA-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4
; DATA-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1
@@ -127,7 +127,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; DATA_NO_LANEMASK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; DATA_NO_LANEMASK-NEXT: br label [[WHILE_BODY:%.*]]
; DATA_NO_LANEMASK: while.body:
-; DATA_NO_LANEMASK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; DATA_NO_LANEMASK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
; DATA_NO_LANEMASK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]]
; DATA_NO_LANEMASK-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4
; DATA_NO_LANEMASK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1
@@ -169,7 +169,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; DATA_AND_CONTROL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; DATA_AND_CONTROL-NEXT: br label [[WHILE_BODY:%.*]]
; DATA_AND_CONTROL: while.body:
-; DATA_AND_CONTROL-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; DATA_AND_CONTROL-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
; DATA_AND_CONTROL-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]]
; DATA_AND_CONTROL-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4
; DATA_AND_CONTROL-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1
@@ -216,7 +216,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: br label [[WHILE_BODY:%.*]]
; DATA_AND_CONTROL_NO_RT_CHECK: while.body:
-; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]]
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll
index d0ea828..bd6a027 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll
@@ -116,7 +116,7 @@ define void @load_store_interleave_group_tc_2(ptr noalias %data) {
; VF4-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; VF4-NEXT: br label %[[LOOP:.*]]
; VF4: [[LOOP]]:
-; VF4-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF4-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; VF4-NEXT: [[MUL_2:%.*]] = shl nsw i64 [[IV]], 1
; VF4-NEXT: [[DATA_0:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[MUL_2]]
; VF4-NEXT: [[L_0:%.*]] = load i64, ptr [[DATA_0]], align 8
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll
index 66bb80b..59e65f7 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll
@@ -30,7 +30,7 @@ define void @test_stride1_4i32(ptr readonly %data, ptr noalias nocapture %dst, i
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[I_023]], 1
; CHECK-NEXT: [[ADD5:%.*]] = add nuw nsw i32 [[MUL]], 2
; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i32 [[ADD5]]
@@ -218,7 +218,7 @@ define void @test_stride3_4i32(ptr readonly %data, ptr noalias nocapture %dst, i
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[I_023]], 3
; CHECK-NEXT: [[ADD5:%.*]] = add nuw nsw i32 [[MUL]], 2
; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i32 [[ADD5]]
@@ -280,7 +280,7 @@ define void @test_stride4_4i32(ptr readonly %data, ptr noalias nocapture %dst, i
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[I_023]], 4
; CHECK-NEXT: [[ADD5:%.*]] = add nuw nsw i32 [[MUL]], 2
; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i32 [[ADD5]]
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll
index 83cb325..fd94673 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll
@@ -40,8 +40,8 @@ define i32 @mla_i32(ptr noalias nocapture readonly %A, ptr noalias nocapture rea
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[RES_010:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[RES_010:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[I_011]]
; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP12]] to i32
@@ -120,8 +120,8 @@ define i32 @mla_i8(ptr noalias nocapture readonly %A, ptr noalias nocapture read
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[RES_010:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[RES_010:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[I_011]]
; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP12]] to i32
@@ -195,8 +195,8 @@ define i32 @add_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP7]], [[R_07]]
@@ -260,8 +260,8 @@ define i32 @mul_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 1, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 1, [[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = mul nsw i32 [[TMP7]], [[R_07]]
@@ -325,8 +325,8 @@ define i32 @and_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ -1, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ -1, [[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = and i32 [[TMP7]], [[R_07]]
@@ -390,8 +390,8 @@ define i32 @or_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = or i32 [[TMP7]], [[R_07]]
@@ -455,8 +455,8 @@ define i32 @xor_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = xor i32 [[TMP7]], [[R_07]]
@@ -520,8 +520,8 @@ define float @fadd_f32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[R_07:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[R_07:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = fadd fast float [[TMP7]], [[R_07]]
@@ -585,8 +585,8 @@ define float @fmul_f32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 1.000000e+00, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[R_07:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[R_07:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ 1.000000e+00, [[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = fmul fast float [[TMP7]], [[R_07]]
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll b/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll
index 0f4d40f..8fbeff5 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll
@@ -393,7 +393,7 @@ define void @tail_predicate_without_optsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 %n)
; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; DEFAULT-NEXT: br label %[[FOR_BODY:.*]]
; DEFAULT: [[FOR_BODY]]:
-; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
; DEFAULT-NEXT: [[TMP72:%.*]] = trunc nuw nsw i64 [[INDVARS_IV]] to i8
; DEFAULT-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP72]]
; DEFAULT-NEXT: [[SHR:%.*]] = lshr i8 [[TMP72]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
index 5f13089..2b93668 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
@@ -45,7 +45,7 @@ define void @test_wide_integer_induction(ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY1:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY1:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ]
+; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]]
; CHECK-NEXT: store i64 [[IV1]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1
@@ -74,16 +74,50 @@ define void @test_wide_ptr_induction(ptr noalias %a, ptr noalias %b, i64 %N) {
; CHECK-LABEL: define void @test_wide_ptr_induction(
; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
+; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[B]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+; CHECK-NEXT: [[TMP6:%.*]] = mul <vscale x 2 x i64> [[TMP5]], splat (i64 8)
+; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP6]]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[EVL_BASED_IV]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2p0.p0(<vscale x 2 x ptr> [[VECTOR_GEP]], ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[TMP11:%.*]] = mul i64 8, [[TMP10]]
+; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[B]], [[ENTRY]] ]
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[VECTOR_BODY]] ], [ [[B]], [[VECTOR_PH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[B]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[ADDR]], i64 8
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[EVL_BASED_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: store ptr [[ADDR]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw nsw i64 [[EVL_BASED_IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
;
@@ -109,4 +143,6 @@ for.cond.cleanup:
; CHECK: [[META2]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"}
; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]], [[META1]]}
+; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META3]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
index 6e2434a..1addff6 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
@@ -151,8 +151,8 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ]
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-OUTLOOP: for.body:
-; IF-EVL-OUTLOOP-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; IF-EVL-OUTLOOP-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; IF-EVL-OUTLOOP-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; IF-EVL-OUTLOOP-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[I_08]]
; IF-EVL-OUTLOOP-NEXT: [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
; IF-EVL-OUTLOOP-NEXT: [[CONV:%.*]] = sext i16 [[TMP13]] to i32
@@ -204,8 +204,8 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ]
; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-INLOOP: for.body:
-; IF-EVL-INLOOP-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; IF-EVL-INLOOP-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; IF-EVL-INLOOP-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; IF-EVL-INLOOP-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[I_08]]
; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
; IF-EVL-INLOOP-NEXT: [[CONV:%.*]] = sext i16 [[TMP13]] to i32
@@ -372,8 +372,8 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-OUTLOOP: for.body:
-; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ]
+; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-OUTLOOP-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-OUTLOOP-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP19]], [[RDX]]
@@ -419,8 +419,8 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-INLOOP: for.body:
-; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ]
+; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-INLOOP-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-INLOOP-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP16]], [[RDX]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll
index 0a87257..32cb426 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll
@@ -146,7 +146,7 @@ define void @trip8_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[TMP9]], i64 [[I_08]]
; CHECK-NEXT: [[TMP15:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP15]], 1
@@ -379,8 +379,8 @@ define i8 @mul_non_pow_2_low_trip_count(ptr noalias %a) {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i8 [ 2, [[ENTRY]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[RDX:%.*]] = phi i8 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[RDX:%.*]] = phi i8 [ 2, [[SCALAR_PH]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[GEP]], align 1
; CHECK-NEXT: [[MUL]] = mul i8 [[TMP5]], [[RDX]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll b/llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll
index 0afe04e..07a7b7b 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll
@@ -1,29 +1,36 @@
-; RUN: opt -passes=loop-vectorize \
-; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
-; RUN: -mtriple=riscv64 -mattr=+v -S -debug %s 2>&1 | FileCheck %s
+; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v -S -debug %s 2>&1 | FileCheck %s
; REQUIRES: asserts
-; Make sure we do not vectorize a loop with a widened pointer induction.
-define void @test_wide_pointer_induction(ptr noalias %a, i64 %N) {
+; For %for.1, we are fine initially, because the previous value %for.1.next dominates the
+; user of %for.1. But for %for.2, we have to sink the user (%for.1.next) past the previous
+; value %for.2.next. This however breaks the condition we have for %for.1. We cannot fix
+; both first order recurrences and cannot vectorize the loop.
+;
+; Make sure we don't compute costs if there are no vector VPlans.
+
; CHECK-NOT: LV: Vector loop of width {{.+}} costs:
;
-; CHECK: define void @test_wide_pointer_induction(
+; CHECK: define i32 @test(
; CHECK-NOT: vector.body
;
+define i32 @test(i32 %N) {
entry:
- br label %loop
+ br label %for.body
-loop:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
- %iv.ptr = phi ptr [ %a, %entry ], [ %iv.ptr.next, %loop ]
- %arrayidx = getelementptr inbounds i64, ptr %a, i64 %iv
- store ptr %iv.ptr, ptr %arrayidx, align 8
- %iv.next = add nuw nsw i64 %iv, 1
- %iv.ptr.next = getelementptr i64, ptr %iv.ptr, i32 1
- %exitcond.not = icmp eq i64 %iv.next, %N
- br i1 %exitcond.not, label %exit, label %loop
+for.body: ; preds = %for.body.preheader, %for.body
+ %iv = phi i32 [ %inc, %for.body ], [ 10, %entry ]
+ %for.1 = phi i32 [ %for.1.next, %for.body ], [ 20, %entry ]
+ %for.2 = phi i32 [ %for.2.next, %for.body ], [ 11, %entry ]
+ %for.1.next = add nsw i32 %for.2, 1
+ %for.2.next = shl i32 %for.1, 24
+ %inc = add nsw i32 %iv, 1
+ %exitcond = icmp eq i32 %inc, %N
+ br i1 %exitcond, label %for.cond1.for.end_crit_edge, label %for.body
-exit:
- ret void
+for.cond1.for.end_crit_edge: ; preds = %for.body
+ %add.lcssa = phi i32 [ %for.1.next, %for.body ]
+ %sext.lcssa = phi i32 [ %for.2.next, %for.body ]
+ %res = add i32 %add.lcssa, %sext.lcssa
+ ret i32 %res
}
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
index 01df436..d41d47a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
@@ -58,7 +58,7 @@ define void @test(ptr %p, i64 %a, i8 %b) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_COND1:%.*]]
; CHECK: for.cond:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH1]] ], [ [[ADD:%.*]], [[FOR_BODY:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH1]] ], [ [[ADD:%.*]], [[FOR_BODY:%.*]] ]
; CHECK-NEXT: [[ADD]] = add i32 [[IV]], 1
; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i32 [[IV]], 2
; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[A]], 48
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
index ed50796..c037b70 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
@@ -41,7 +41,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]]
@@ -106,7 +106,7 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8
; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]]
@@ -172,8 +172,8 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8
; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]]
@@ -238,7 +238,7 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -296,7 +296,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: store i64 [[V]], ptr [[B]], align 8
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
@@ -417,7 +417,7 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
index 9e492c6..df907dc 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
@@ -170,7 +170,6 @@ define void @single_constant_stride_ptr_iv(ptr %p) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[P]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP12:%.*]] = mul i64 8, [[TMP8]]
; CHECK-NEXT: [[TMP14:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; CHECK-NEXT: [[TMP16:%.*]] = mul <vscale x 4 x i64> [[TMP14]], splat (i64 8)
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 4 x i64> [[TMP16]]
@@ -181,6 +180,7 @@ define void @single_constant_stride_ptr_iv(ptr %p) {
; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 4 x i32> [[TMP19]], splat (i32 1)
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x ptr> [[VECTOR_GEP]], i32 4, <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; CHECK-NEXT: [[TMP12:%.*]] = mul i64 8, [[TMP8]]
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP12]]
; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
@@ -753,13 +753,11 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[P]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[POINTER_PHI11:%.*]] = phi ptr [ [[P2]], [[VECTOR_PH]] ], [ [[PTR_IND12:%.*]], [[VECTOR_BODY]] ]
-; STRIDED-NEXT: [[TMP17:%.*]] = mul i64 [[STRIDE]], [[TMP13]]
; STRIDED-NEXT: [[TMP19:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; STRIDED-NEXT: [[DOTSPLATINSERT9:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0
; STRIDED-NEXT: [[DOTSPLAT10:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT9]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; STRIDED-NEXT: [[TMP18:%.*]] = mul <vscale x 4 x i64> [[TMP19]], [[DOTSPLAT10]]
; STRIDED-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI11]], <vscale x 4 x i64> [[TMP18]]
-; STRIDED-NEXT: [[TMP25:%.*]] = mul i64 [[STRIDE]], [[TMP13]]
; STRIDED-NEXT: [[TMP27:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; STRIDED-NEXT: [[TMP21:%.*]] = mul <vscale x 4 x i64> [[TMP27]], [[DOTSPLAT10]]
; STRIDED-NEXT: [[VECTOR_GEP7:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 4 x i64> [[TMP21]]
@@ -767,7 +765,9 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-NEXT: [[TMP30:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
; STRIDED-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP30]], <vscale x 4 x ptr> [[VECTOR_GEP]], i32 4, <vscale x 4 x i1> splat (i1 true)), !alias.scope [[META18:![0-9]+]], !noalias [[META15]]
; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP13]]
+; STRIDED-NEXT: [[TMP25:%.*]] = mul i64 [[STRIDE]], [[TMP13]]
; STRIDED-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP25]]
+; STRIDED-NEXT: [[TMP17:%.*]] = mul i64 [[STRIDE]], [[TMP13]]
; STRIDED-NEXT: [[PTR_IND12]] = getelementptr i8, ptr [[POINTER_PHI11]], i64 [[TMP17]]
; STRIDED-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; STRIDED-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
index ce2b790..2be74e5 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
@@ -1330,7 +1330,7 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[GEP]] to i64
; IF-EVL-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
index d02d53b..76a830a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
@@ -57,8 +57,8 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-OUTLOOP: for.body:
-; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-OUTLOOP-NEXT: [[TMP27:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-OUTLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP27]], 3
@@ -108,8 +108,8 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-INLOOP: for.body:
-; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-INLOOP-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-INLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP25]], 3
@@ -285,8 +285,8 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-OUTLOOP: for.body:
-; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[FOR_INC]] ]
+; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
+; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[FOR_INC]] ]
; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-OUTLOOP-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-OUTLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP28]], 3
@@ -339,8 +339,8 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-INLOOP: for.body:
-; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[FOR_INC]] ]
+; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
+; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[FOR_INC]] ]
; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-INLOOP-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-INLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP25]], 3
@@ -537,8 +537,8 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-OUTLOOP: for.body:
-; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-OUTLOOP-NEXT: [[TMP37:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-OUTLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32
@@ -597,8 +597,8 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-INLOOP: for.body:
-; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[RDX1:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ]
+; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-INLOOP-NEXT: [[RDX1:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-INLOOP-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-INLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32
@@ -804,8 +804,8 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX1:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-OUTLOOP: for.body:
-; IF-EVL-OUTLOOP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[MIDDLE_BLOCK:%.*]] ]
-; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX1]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[MIDDLE_BLOCK]] ]
+; IF-EVL-OUTLOOP-NEXT: [[IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[MIDDLE_BLOCK:%.*]] ]
+; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[MIDDLE_BLOCK]] ]
; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]]
; IF-EVL-OUTLOOP-NEXT: [[TMP38:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
; IF-EVL-OUTLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV1]] to i32
@@ -867,8 +867,8 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX1:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-INLOOP: for.body:
-; IF-EVL-INLOOP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[MIDDLE_BLOCK:%.*]] ]
-; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX1]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[MIDDLE_BLOCK]] ]
+; IF-EVL-INLOOP-NEXT: [[IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[MIDDLE_BLOCK:%.*]] ]
+; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[MIDDLE_BLOCK]] ]
; IF-EVL-INLOOP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]]
; IF-EVL-INLOOP-NEXT: [[TMP35:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
; IF-EVL-INLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV1]] to i32
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
index ae047f5..a216aa8 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
@@ -45,7 +45,7 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8
; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
@@ -166,7 +166,7 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8
; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
@@ -286,7 +286,7 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8
; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
@@ -406,7 +406,7 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8
; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
index 987f946..f92bf5a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
@@ -53,8 +53,8 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ 33, %[[ENTRY]] ]
; IF-EVL-NEXT: br label %[[FOR_BODY:.*]]
; IF-EVL: [[FOR_BODY]]:
-; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP24:%.*]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ 33, %[[SCALAR_PH]] ], [ [[TMP24:%.*]], %[[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]]
; IF-EVL-NEXT: [[TMP24]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR1]], [[TMP24]]
@@ -192,9 +192,9 @@ define void @second_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: [[SCALAR_RECUR_INIT3:%.*]] = phi i32 [ 22, %[[ENTRY]] ]
; IF-EVL-NEXT: br label %[[FOR_BODY:.*]]
; IF-EVL: [[FOR_BODY]]:
-; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP31:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR2:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT3]], %[[SCALAR_PH]] ], [ [[FOR1]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ 33, %[[SCALAR_PH]] ], [ [[TMP31:%.*]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[FOR2:%.*]] = phi i32 [ 22, %[[SCALAR_PH]] ], [ [[FOR1]], %[[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]]
; IF-EVL-NEXT: [[TMP31]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR1]], [[FOR2]]
@@ -353,10 +353,10 @@ define void @third_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: [[SCALAR_RECUR_INIT6:%.*]] = phi i32 [ 11, %[[ENTRY]] ]
; IF-EVL-NEXT: br label %[[FOR_BODY:.*]]
; IF-EVL: [[FOR_BODY]]:
-; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP38:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR2:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT5]], %[[SCALAR_PH]] ], [ [[FOR1]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR3:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT6]], %[[SCALAR_PH]] ], [ [[FOR2]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ 33, %[[SCALAR_PH]] ], [ [[TMP38:%.*]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[FOR2:%.*]] = phi i32 [ 22, %[[SCALAR_PH]] ], [ [[FOR1]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[FOR3:%.*]] = phi i32 [ 11, %[[SCALAR_PH]] ], [ [[FOR2]], %[[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]]
; IF-EVL-NEXT: [[TMP38]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR2]], [[FOR3]]
@@ -666,8 +666,8 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) {
; IF-EVL-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ 33, %[[ENTRY]] ]
; IF-EVL-NEXT: br label %[[FOR_BODY:.*]]
; IF-EVL: [[FOR_BODY]]:
-; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV1_NEXT:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR1:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP14:%.*]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV1_NEXT:%.*]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[FOR1:%.*]] = phi i64 [ 33, %[[SCALAR_PH]] ], [ [[TMP14:%.*]], %[[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14]] = add i64 [[IV1]], 42
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i64, ptr [[A]], i64 [[IV1]]
; IF-EVL-NEXT: store i64 [[FOR1]], ptr [[ARRAYIDX]], align 8
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
index 2aeb1d0..da5aed9 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
@@ -51,7 +51,7 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY1:%.*]] ]
; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[INDVARS_IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT1:%.*]], [[FOR_BODY1]] ]
+; IF-EVL-NEXT: [[INDVARS_IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT1:%.*]], [[FOR_BODY1]] ]
; IF-EVL-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[INDEX]], i64 [[INDVARS_IV1]]
; IF-EVL-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX3]], align 8
; IF-EVL-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[TMP0]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
index 3e23df7..433d1e4 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
@@ -44,8 +44,8 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ADD]] = add nsw i32 [[TMP18]], [[RDX]]
@@ -259,8 +259,8 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[OR:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[OR:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[OR]] = or i32 [[TMP18]], [[RDX]]
@@ -367,8 +367,8 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[AND:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[AND:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[AND]] = and i32 [[TMP18]], [[RDX]]
@@ -475,8 +475,8 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[XOR:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[XOR:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[XOR]] = xor i32 [[TMP18]], [[RDX]]
@@ -583,8 +583,8 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP17]], [[RDX]]
@@ -694,8 +694,8 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SMAX:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMAX:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp sgt i32 [[TMP17]], [[RDX]]
@@ -805,8 +805,8 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[UMIN:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMIN:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ult i32 [[TMP17]], [[RDX]]
@@ -916,8 +916,8 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[UMAX:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMAX:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ugt i32 [[TMP17]], [[RDX]]
@@ -1027,8 +1027,8 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ADD]] = fadd reassoc float [[TMP18]], [[RDX]]
@@ -1243,8 +1243,8 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MIN:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MIN:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast olt float [[TMP17]], [[RDX]]
@@ -1356,8 +1356,8 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MAX:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MAX:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast ogt float [[TMP17]], [[RDX]]
@@ -1687,8 +1687,8 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
@@ -1807,8 +1807,8 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP21]], 3
@@ -1924,8 +1924,8 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = fcmp fast olt float [[TMP21]], 3.000000e+00
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
index 8d987a9..c5d2739 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
@@ -50,7 +50,7 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i64 [[IV]], i32 0
; IF-EVL-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i64 [[IV]], i32 1
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
index d474a03..62a4f73 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
@@ -39,7 +39,7 @@ define void @iv32(ptr noalias %a, ptr noalias %b, i32 %N) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY1:%.*]] ]
; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV1:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ]
+; IF-EVL-NEXT: [[IV1:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[IV1]]
; IF-EVL-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[IV1]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
index 06c6bfe..296405d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
@@ -44,7 +44,7 @@ define void @trip_count_max_1024(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[GEP]], align 8
; CHECK-NEXT: [[Y:%.*]] = add i64 [[X]], 1
@@ -113,7 +113,7 @@ define void @overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[GEP]], align 8
; CHECK-NEXT: [[Y:%.*]] = add i64 [[X]], 1
@@ -182,7 +182,7 @@ define void @no_overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[GEP]], align 8
; CHECK-NEXT: [[Y:%.*]] = add i64 [[X]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
index 5f407fc..e06bbe9 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
@@ -43,7 +43,7 @@ define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_INC:%.*]] ], [ 0, [[SCALAR_PH]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I_011]]
; IF-EVL-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP1:%.*]] = icmp ne i32 [[TMP23]], 0
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
index 59d1370..775d9ca 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
@@ -43,8 +43,8 @@ define float @fadd(ptr noalias nocapture readonly %a, i64 %n) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ADD]] = fadd float [[TMP17]], [[SUM_07]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
index 2d5718b..464667d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
@@ -44,8 +44,8 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ADD]] = add nsw i32 [[TMP18]], [[RDX]]
@@ -262,8 +262,8 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[OR:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[OR:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[OR]] = or i32 [[TMP18]], [[RDX]]
@@ -373,8 +373,8 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[AND:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[AND:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[AND]] = and i32 [[TMP18]], [[RDX]]
@@ -484,8 +484,8 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[XOR:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[XOR:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[XOR]] = xor i32 [[TMP18]], [[RDX]]
@@ -597,8 +597,8 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP19]], [[RDX]]
@@ -715,8 +715,8 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SMAX:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMAX:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp sgt i32 [[TMP19]], [[RDX]]
@@ -833,8 +833,8 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[UMIN:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMIN:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ult i32 [[TMP19]], [[RDX]]
@@ -951,8 +951,8 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[UMAX:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMAX:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ugt i32 [[TMP19]], [[RDX]]
@@ -1067,8 +1067,8 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ADD]] = fadd reassoc float [[TMP18]], [[RDX]]
@@ -1287,8 +1287,8 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MIN:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MIN:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast olt float [[TMP19]], [[RDX]]
@@ -1405,8 +1405,8 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MAX:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MAX:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast ogt float [[TMP19]], [[RDX]]
@@ -1739,8 +1739,8 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
@@ -1859,8 +1859,8 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP20]], 3
@@ -1976,8 +1976,8 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = fcmp fast olt float [[TMP20]], 3.000000e+00
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
index e2db28d..397cb95 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
@@ -57,8 +57,8 @@ define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %pt
; IF-EVL-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i32 [ 0, [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1
; IF-EVL-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[ADD]]
; IF-EVL-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4
@@ -205,8 +205,8 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal
; IF-EVL-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i32 [ 0, [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_INC:%.*]] ]
-; IF-EVL-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_INC]] ]
+; IF-EVL-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_INC:%.*]] ]
+; IF-EVL-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_INC]] ]
; IF-EVL-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1
; IF-EVL-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i32 [[I]]
; IF-EVL-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4
@@ -388,7 +388,7 @@ define void @multiple_reverse_vector_pointer(ptr noalias %a, ptr noalias %b, ptr
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1024, [[ENTRY:%.*]] ]
; IF-EVL-NEXT: br label [[LOOP:%.*]]
; IF-EVL: loop:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 1024, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; IF-EVL-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[X:%.*]] = load i8, ptr [[GEP_A]], align 1
; IF-EVL-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[B]], i8 [[X]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
index 1c78b25..2ec23b91 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
@@ -44,7 +44,7 @@ define void @test(ptr %p) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; IF-EVL-NEXT: br label [[LOOP:%.*]]
; IF-EVL: loop:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 8
; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 200
@@ -375,7 +375,7 @@ define void @trivial_due_max_vscale(ptr %p) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; IF-EVL-NEXT: br label [[LOOP:%.*]]
; IF-EVL: loop:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 8192
@@ -483,7 +483,7 @@ define void @no_high_lmul_or_interleave(ptr %p) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; IF-EVL-NEXT: br label [[LOOP:%.*]]
; IF-EVL: loop:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 1024
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
index 687a2e7..ab05166 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
@@ -50,7 +50,7 @@ define void @lshift_significand(i32 %n, ptr nocapture writeonly %dst) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[TMP22:%.*]] = sub nuw nsw i64 1, [[IV1]]
; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP22]]
; CHECK-NEXT: store i64 0, ptr [[ARRAYIDX14]], align 8
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
index 24649729..034b767 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
@@ -179,7 +179,7 @@ define void @truncate_to_i1_used_by_branch(i8 %x, ptr %dst) #0 {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[F_039:%.*]] = phi i8 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[F_039:%.*]] = phi i8 [ 0, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: [[TMP4:%.*]] = or i8 23, [[X]]
; CHECK-NEXT: [[EXTRACT_T:%.*]] = trunc i8 [[TMP4]] to i1
; CHECK-NEXT: br i1 [[EXTRACT_T]], label %[[THEN:.*]], label %[[LOOP_LATCH]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
index dfdc893..01edeed 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
@@ -36,7 +36,7 @@ define void @truncate_to_minimal_bitwidths_widen_cast_recipe(ptr %src) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[GEP_SRC1:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV1]]
; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr [[GEP_SRC1]], align 1
; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP11]] to i32
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
index 568aa95..d97e93d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
@@ -117,7 +117,7 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6
; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; TF-SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8
; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
@@ -439,7 +439,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
+; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
; TF-SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10
; TF-SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_LOAD:.*]], label %[[LATCH]]
; TF-SCALABLE: [[DO_LOAD]]:
@@ -589,7 +589,7 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt
; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; TF-SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 1
; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
@@ -726,7 +726,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8
; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
@@ -890,7 +890,7 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias
; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; TF-SCALABLE-NEXT: store i64 [[IV]], ptr [[B]], align 8
; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
@@ -1068,7 +1068,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
+; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
; TF-SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10
; TF-SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_STORE:.*]], label %[[LATCH]]
; TF-SCALABLE: [[DO_STORE]]:
@@ -1216,7 +1216,7 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap
; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 1
; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll
index 7c1ec9a..d93a5c0 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll
@@ -27,7 +27,7 @@ define void @foo(ptr %arg) #0 {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr [3 x i64], ptr [[ARG]], i64 0, i64 [[IV]]
; CHECK-NEXT: store i64 0, ptr [[GEP]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
index 85116fe..d3c3c6b 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
@@ -43,7 +43,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP22:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]]
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll
index 082e326..0fb4655 100644
--- a/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll
@@ -42,7 +42,7 @@ define void @test_scalar_steps_target_instruction_cost(ptr %dst) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[IV]]
; CHECK-NEXT: store i64 [[IV]], ptr [[GEP]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 3
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll
index 02a876a..d7cc6f0 100644
--- a/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll
@@ -96,7 +96,7 @@ define void @test(ptr %p, i40 %a) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[SHL:%.*]] = shl i40 [[A]], 24
; CHECK-NEXT: [[ASHR:%.*]] = ashr i40 [[SHL]], 28
; CHECK-NEXT: [[TRUNC:%.*]] = trunc i40 [[ASHR]] to i32
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll
index e0fc73f..4e46a29 100644
--- a/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll
@@ -69,8 +69,8 @@ define void @func_21() {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[LV:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[LV:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[A_PTR:%.*]] = getelementptr inbounds [5 x i32], ptr @A, i64 0, i64 [[INDVARS_IV]]
; CHECK-NEXT: [[LV]] = load i32, ptr [[A_PTR]], align 4
; CHECK-NEXT: [[B_PTR:%.*]] = getelementptr inbounds [5 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll b/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll
index c61b1b9..37493d1 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll
@@ -117,7 +117,7 @@ define void @redundant_or_1(ptr %dst, i1 %c.0, i1 %c.1) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
; CHECK-NEXT: br i1 [[C_0]], label [[LOOP_LATCH]], label [[THEN_1:%.*]]
; CHECK: then.1:
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[IV]], 2
@@ -220,7 +220,7 @@ define void @redundant_or_2(ptr %dst, i1 %c.0, i1 %c.1) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
; CHECK-NEXT: br i1 [[C_1]], label [[LOOP_LATCH]], label [[THEN_1:%.*]]
; CHECK: then.1:
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[IV]], 2
diff --git a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll
index 85b475c..1a3ff6c 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll
@@ -1055,8 +1055,8 @@ define i64 @live_in_known_1_via_scev() {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ 3, [[PH]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi i64 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RED_MUL:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[RED:%.*]] = phi i64 [ 3, [[SCALAR_PH]] ], [ [[RED_MUL:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[RED_MUL]] = mul nsw i64 [[RED]], [[P_EXT]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll b/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll
index 1249df4..ee85e0e 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll
@@ -46,8 +46,8 @@ define i1 @fn(ptr %nno) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[FOR_BODY20:%.*]]
; CHECK: loop.header:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC35:%.*]] ]
-; CHECK-NEXT: [[SUM_01:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_1:%.*]], [[FOR_INC35]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 10, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC35:%.*]] ]
+; CHECK-NEXT: [[SUM_01:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[SUM_1:%.*]], [[FOR_INC35]] ]
; CHECK-NEXT: [[REM4:%.*]] = and i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[CMP21:%.*]] = icmp eq i64 [[REM4]], 0
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw i32, ptr [[NNO]], i64 [[INDVARS_IV]]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll
index fe2ad66..07b130b 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll
@@ -507,8 +507,8 @@ define void @test_first_order_recurrence_tried_to_scalarized(ptr %dst, i1 %c, i3
; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ 4, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[FOR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[FOR:%.*]] = phi i32 [ 4, [[SCALAR_PH]] ], [ [[IV]], [[LOOP]] ]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[FOR]]
; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds nuw i32, ptr [[DST]], i32 [[IV]]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll
index fcd94f4..a66800c 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll
@@ -623,7 +623,7 @@ define void @wide_iv_trunc(ptr %dst, i64 %N) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32
; CHECK-NEXT: store i32 [[IV_TRUNC]], ptr [[DST]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/X86/optsize.ll b/llvm/test/Transforms/LoopVectorize/X86/optsize.ll
index 07e2df3..c5ac0ae 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/optsize.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/optsize.ll
@@ -35,7 +35,7 @@ define i32 @foo_optsize() #0 {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0
@@ -72,7 +72,7 @@ define i32 @foo_optsize() #0 {
; AUTOVF-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; AUTOVF-NEXT: br label [[FOR_BODY:%.*]]
; AUTOVF: for.body:
-; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; AUTOVF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]]
; AUTOVF-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; AUTOVF-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0
@@ -131,7 +131,7 @@ define i32 @foo_minsize() #1 {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0
@@ -168,7 +168,7 @@ define i32 @foo_minsize() #1 {
; AUTOVF-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; AUTOVF-NEXT: br label [[FOR_BODY:%.*]]
; AUTOVF: for.body:
-; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; AUTOVF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]]
; AUTOVF-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; AUTOVF-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0
@@ -379,7 +379,7 @@ define void @tail_folded_store_avx512(ptr %start, ptr %end) #3 {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[START]], [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[START]], [[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr nusw i8, ptr [[PTR_IV]], i64 -72
; CHECK-NEXT: store ptr null, ptr [[PTR_IV]], align 8
; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[END]]
@@ -423,7 +423,7 @@ define void @tail_folded_store_avx512(ptr %start, ptr %end) #3 {
; AUTOVF-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[START]], [[ENTRY:%.*]] ]
; AUTOVF-NEXT: br label [[LOOP:%.*]]
; AUTOVF: loop:
-; AUTOVF-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
+; AUTOVF-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[START]], [[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
; AUTOVF-NEXT: [[PTR_IV_NEXT]] = getelementptr nusw i8, ptr [[PTR_IV]], i64 -72
; AUTOVF-NEXT: store ptr null, ptr [[PTR_IV]], align 8
; AUTOVF-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[END]]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/outer_loop_test1_no_explicit_vect_width.ll b/llvm/test/Transforms/LoopVectorize/X86/outer_loop_test1_no_explicit_vect_width.ll
index 02d48cb..7a59884 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/outer_loop_test1_no_explicit_vect_width.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/outer_loop_test1_no_explicit_vect_width.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "^scalar.ph" --version 5
; RUN: opt -S -passes=loop-vectorize -enable-vplan-native-path -mtriple x86_64 < %s | FileCheck %s
; RUN: opt -S -passes=loop-vectorize -enable-vplan-native-path -mtriple x86_64 -mattr=+avx < %s | FileCheck %s --check-prefix=AVX
; RUN: opt -S -passes=loop-vectorize -enable-vplan-native-path -mtriple x86_64 -mattr=+avx2 < %s | FileCheck %s --check-prefix=AVX
@@ -18,68 +19,78 @@
; }
;
-; CHECK-LABEL: vector.ph:
-; CHECK: %[[SplatVal:.*]] = insertelement <4 x i32> poison, i32 %n, i64 0
-; CHECK: %[[Splat:.*]] = shufflevector <4 x i32> %[[SplatVal]], <4 x i32> poison, <4 x i32> zeroinitializer
-
-; CHECK-LABEL: vector.body:
-; CHECK: %[[Ind:.*]] = phi i64 [ 0, %vector.ph ], [ %[[IndNext:.*]], %[[ForInc:.*]] ]
-; CHECK: %[[VecInd:.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %vector.ph ], [ %[[VecIndNext:.*]], %[[ForInc]] ]
-; CHECK: %[[AAddr:.*]] = getelementptr inbounds [8 x i32], ptr @arr2, i64 0, <4 x i64> %[[VecInd]]
-; CHECK: %[[VecIndTr:.*]] = trunc <4 x i64> %[[VecInd]] to <4 x i32>
-; CHECK: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %[[VecIndTr]], <4 x ptr> %[[AAddr]], i32 4, <4 x i1> splat (i1 true))
-; CHECK: %[[VecIndTr2:.*]] = trunc <4 x i64> %[[VecInd]] to <4 x i32>
-; CHECK: %[[StoreVal:.*]] = add nsw <4 x i32> %[[VecIndTr2]], %[[Splat]]
-; CHECK: br label %[[InnerLoop:.+]]
-
-; CHECK: [[InnerLoop]]:
-; CHECK: %[[InnerPhi:.*]] = phi <4 x i64> [ zeroinitializer, %vector.body ], [ %[[InnerPhiNext:.*]], %[[InnerLoop]] ]
-; CHECK: %[[AAddr2:.*]] = getelementptr inbounds [8 x [8 x i32]], ptr @arr, i64 0, <4 x i64> %[[InnerPhi]], <4 x i64> %[[VecInd]]
-; CHECK: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %[[StoreVal]], <4 x ptr> %[[AAddr2]], i32 4, <4 x i1> splat (i1 true))
-; CHECK: %[[InnerPhiNext]] = add nuw nsw <4 x i64> %[[InnerPhi]], splat (i64 1)
-; CHECK: %[[VecCond:.*]] = icmp eq <4 x i64> %[[InnerPhiNext]], splat (i64 8)
-; CHECK: %[[InnerCond:.*]] = extractelement <4 x i1> %[[VecCond]], i32 0
-; CHECK: br i1 %[[InnerCond]], label %[[ForInc]], label %[[InnerLoop]]
-
-; CHECK: [[ForInc]]:
-; CHECK: %[[IndNext]] = add nuw i64 %[[Ind]], 4
-; CHECK: %[[VecIndNext]] = add <4 x i64> %[[VecInd]], splat (i64 4)
-; CHECK: %[[Cmp:.*]] = icmp eq i64 %[[IndNext]], 8
-; CHECK: br i1 %[[Cmp]], label %middle.block, label %vector.body
-
-; AVX-LABEL: vector.ph:
-; AVX: %[[SplatVal:.*]] = insertelement <8 x i32> poison, i32 %n, i64 0
-; AVX: %[[Splat:.*]] = shufflevector <8 x i32> %[[SplatVal]], <8 x i32> poison, <8 x i32> zeroinitializer
-
-; AVX-LABEL: vector.body:
-; AVX: %[[Ind:.*]] = phi i64 [ 0, %vector.ph ], [ %[[IndNext:.*]], %[[ForInc:.*]] ]
-; AVX: %[[VecInd:.*]] = phi <8 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>, %vector.ph ], [ %[[VecIndNext:.*]], %[[ForInc]] ]
-; AVX: %[[AAddr:.*]] = getelementptr inbounds [8 x i32], ptr @arr2, i64 0, <8 x i64> %[[VecInd]]
-; AVX: %[[VecIndTr:.*]] = trunc <8 x i64> %[[VecInd]] to <8 x i32>
-; AVX: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %[[VecIndTr]], <8 x ptr> %[[AAddr]], i32 4, <8 x i1> splat (i1 true))
-; AVX: %[[VecIndTr2:.*]] = trunc <8 x i64> %[[VecInd]] to <8 x i32>
-; AVX: %[[StoreVal:.*]] = add nsw <8 x i32> %[[VecIndTr2]], %[[Splat]]
-; AVX: br label %[[InnerLoop:.+]]
-
-; AVX: [[InnerLoop]]:
-; AVX: %[[InnerPhi:.*]] = phi <8 x i64> [ zeroinitializer, %vector.body ], [ %[[InnerPhiNext:.*]], %[[InnerLoop]] ]
-; AVX: %[[AAddr2:.*]] = getelementptr inbounds [8 x [8 x i32]], ptr @arr, i64 0, <8 x i64> %[[InnerPhi]], <8 x i64> %[[VecInd]]
-; AVX: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %[[StoreVal]], <8 x ptr> %[[AAddr2]], i32 4, <8 x i1> splat (i1 true))
-; AVX: %[[InnerPhiNext]] = add nuw nsw <8 x i64> %[[InnerPhi]], splat (i64 1)
-; AVX: %[[VecCond:.*]] = icmp eq <8 x i64> %[[InnerPhiNext]], splat (i64 8)
-; AVX: %[[InnerCond:.*]] = extractelement <8 x i1> %[[VecCond]], i32 0
-; AVX: br i1 %[[InnerCond]], label %[[ForInc]], label %[[InnerLoop]]
-
-; AVX: [[ForInc]]:
-; AVX: %[[IndNext]] = add nuw i64 %[[Ind]], 8
-; AVX: %[[VecIndNext]] = add <8 x i64> %[[VecInd]], splat (i64 8)
-; AVX: br i1 true, label %middle.block, label %vector.body
-
@arr2 = external global [8 x i32], align 16
@arr = external global [8 x [8 x i32]], align 16
; Function Attrs: norecurse nounwind uwtable
define void @foo(i32 %n) {
+; CHECK-LABEL: define void @foo(
+; CHECK-SAME: i32 [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_LATCH:.*]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_LATCH]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [8 x i32], ptr @arr2, i64 0, <4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP1:%.*]] = trunc <4 x i64> [[VEC_IND]] to <4 x i32>
+; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> [[TMP1]], <4 x ptr> [[TMP0]], i32 4, <4 x i1> splat (i1 true))
+; CHECK-NEXT: [[TMP8:%.*]] = trunc <4 x i64> [[VEC_IND]] to <4 x i32>
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[TMP8]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: br label %[[FOR_BODY31:.*]]
+; CHECK: [[FOR_BODY31]]:
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ zeroinitializer, %[[VECTOR_BODY]] ], [ [[TMP4:%.*]], %[[FOR_BODY31]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [8 x [8 x i32]], ptr @arr, i64 0, <4 x i64> [[VEC_PHI]], <4 x i64> [[VEC_IND]]
+; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> [[TMP2]], <4 x ptr> [[TMP3]], i32 4, <4 x i1> splat (i1 true))
+; CHECK-NEXT: [[TMP4]] = add nuw nsw <4 x i64> [[VEC_PHI]], splat (i64 1)
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i64> [[TMP4]], splat (i64 8)
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP5]], i32 0
+; CHECK-NEXT: br i1 [[TMP6]], label %[[VECTOR_LATCH]], label %[[FOR_BODY31]]
+; CHECK: [[VECTOR_LATCH]]:
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 8
+; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br i1 true, [[FOR_END10:label %.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+;
+; AVX-LABEL: define void @foo(
+; AVX-SAME: i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; AVX-NEXT: [[ENTRY:.*:]]
+; AVX-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; AVX: [[VECTOR_PH]]:
+; AVX-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[N]], i64 0
+; AVX-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i32> [[BROADCAST_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer
+; AVX-NEXT: br label %[[VECTOR_BODY:.*]]
+; AVX: [[VECTOR_BODY]]:
+; AVX-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_LATCH:.*]] ]
+; AVX-NEXT: [[VEC_IND:%.*]] = phi <8 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_LATCH]] ]
+; AVX-NEXT: [[TMP0:%.*]] = getelementptr inbounds [8 x i32], ptr @arr2, i64 0, <8 x i64> [[VEC_IND]]
+; AVX-NEXT: [[TMP1:%.*]] = trunc <8 x i64> [[VEC_IND]] to <8 x i32>
+; AVX-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[TMP1]], <8 x ptr> [[TMP0]], i32 4, <8 x i1> splat (i1 true))
+; AVX-NEXT: [[TMP7:%.*]] = trunc <8 x i64> [[VEC_IND]] to <8 x i32>
+; AVX-NEXT: [[TMP2:%.*]] = add nsw <8 x i32> [[TMP7]], [[BROADCAST_SPLAT]]
+; AVX-NEXT: br label %[[FOR_BODY31:.*]]
+; AVX: [[FOR_BODY31]]:
+; AVX-NEXT: [[VEC_PHI:%.*]] = phi <8 x i64> [ zeroinitializer, %[[VECTOR_BODY]] ], [ [[TMP4:%.*]], %[[FOR_BODY31]] ]
+; AVX-NEXT: [[TMP3:%.*]] = getelementptr inbounds [8 x [8 x i32]], ptr @arr, i64 0, <8 x i64> [[VEC_PHI]], <8 x i64> [[VEC_IND]]
+; AVX-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[TMP2]], <8 x ptr> [[TMP3]], i32 4, <8 x i1> splat (i1 true))
+; AVX-NEXT: [[TMP4]] = add nuw nsw <8 x i64> [[VEC_PHI]], splat (i64 1)
+; AVX-NEXT: [[TMP5:%.*]] = icmp eq <8 x i64> [[TMP4]], splat (i64 8)
+; AVX-NEXT: [[TMP6:%.*]] = extractelement <8 x i1> [[TMP5]], i32 0
+; AVX-NEXT: br i1 [[TMP6]], label %[[VECTOR_LATCH]], label %[[FOR_BODY31]]
+; AVX: [[VECTOR_LATCH]]:
+; AVX-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; AVX-NEXT: [[VEC_IND_NEXT]] = add <8 x i64> [[VEC_IND]], splat (i64 8)
+; AVX-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; AVX: [[MIDDLE_BLOCK]]:
+; AVX-NEXT: br i1 true, [[FOR_END10:label %.*]], label %[[SCALAR_PH]]
+; AVX: [[SCALAR_PH]]:
+;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll b/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll
index 08adfdd..11c5e39 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll
@@ -44,7 +44,7 @@ define void @test(ptr noundef align 8 dereferenceable_or_null(16) %arr) #0 {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 99, [[BB5:%.*]] ]
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 99, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
; CHECK-NEXT: [[AND:%.*]] = and i64 [[IV]], 1
; CHECK-NEXT: [[ICMP17:%.*]] = icmp eq i64 [[AND]], 0
; CHECK-NEXT: br i1 [[ICMP17]], label [[BB18:%.*]], label [[LOOP_LATCH]], !prof [[PROF5:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll b/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll
index 440f6e1..4145967 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll
@@ -53,7 +53,7 @@ define void @value_defined_in_loop1_used_for_trip_counts(i32 %start, i1 %c, ptr
; CHECK-NEXT: [[EC_2:%.*]] = icmp ult i64 [[IV_2]], [[IV_1_LCSSA]]
; CHECK-NEXT: br i1 [[EC_2]], label %[[LOOP_2]], label %[[EXIT_1_LOOPEXIT:.*]]
; CHECK: [[LOOP_3]]:
-; CHECK-NEXT: [[IV_4:%.*]] = phi i64 [ [[IV_4_NEXT:%.*]], %[[LOOP_3]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[IV_4:%.*]] = phi i64 [ [[IV_4_NEXT:%.*]], %[[LOOP_3]] ], [ 0, %[[SCALAR_PH]] ]
; CHECK-NEXT: [[GEP_DST_2:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_4]]
; CHECK-NEXT: store i8 0, ptr [[GEP_DST_2]], align 1
; CHECK-NEXT: [[IV_4_NEXT]] = add i64 [[IV_4]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll b/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll
index 5e35c4a..9a81fae 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll
@@ -35,7 +35,7 @@ define dso_local void @tail_folding_enabled(ptr noalias nocapture %A, ptr noalia
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDVARS_IV]]
@@ -99,7 +99,7 @@ define dso_local void @tail_folding_disabled(ptr noalias nocapture %A, ptr noali
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDVARS_IV]]
@@ -181,8 +181,8 @@ define i32 @reduction_i32(ptr nocapture readonly %A, ptr nocapture readonly %B,
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[SUM_0:%.*]] = phi i32 [ [[SUM_1:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[SUM_0:%.*]] = phi i32 [ [[SUM_1:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[ARRAYIDXA:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDXA]], align 4
diff --git a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
index f7eba42..a926ff4 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
@@ -146,7 +146,7 @@ define void @vectorized1(ptr noalias nocapture %A, ptr noalias nocapture readonl
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP7]]
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll
index 59f2925..e7fa655 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll
@@ -43,7 +43,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll
index e9d85c2..f4fe120 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll
@@ -79,7 +79,7 @@ define void @test_pr59090(ptr %l_out, ptr noalias %b) #0 {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[IV_MUL:%.*]] = mul nuw i64 [[IV]], 6
; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[B]], align 1, !llvm.access.group [[ACC_GRP0]]
; CHECK-NEXT: store i8 [[L]], ptr [[B]], align 1, !llvm.access.group [[ACC_GRP0]]
diff --git a/llvm/test/Transforms/LoopVectorize/assume.ll b/llvm/test/Transforms/LoopVectorize/assume.ll
index c81f48f..ff83a612 100644
--- a/llvm/test/Transforms/LoopVectorize/assume.ll
+++ b/llvm/test/Transforms/LoopVectorize/assume.ll
@@ -1,10 +1,19 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "^scalar.ph" --version 5
; RUN: opt < %s -passes=loop-vectorize -force-vector-width=2 -force-vector-interleave=2 -S | FileCheck %s
define void @test1(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) {
-; CHECK-LABEL: @test1(
-; CHECK: vector.body:
-; CHECK: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr {{.*}}, align 4
-; CHECK: [[WIDE_LOAD1:%.*]] = load <2 x float>, ptr {{.*}}, align 4
+; CHECK-LABEL: define void @test1(
+; CHECK-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias readonly captures(none) [[B:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[TMP0]], i32 2
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x float>, ptr [[TMP7]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = fcmp ogt <2 x float> [[WIDE_LOAD]], splat (float 1.000000e+02)
; CHECK-NEXT: [[TMP2:%.*]] = fcmp ogt <2 x float> [[WIDE_LOAD1]], splat (float 1.000000e+02)
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[TMP1]], i32 0
@@ -15,6 +24,19 @@ define void @test1(ptr noalias nocapture %a, ptr noalias nocapture readonly %b)
; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP5]])
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1
; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP6]])
+; CHECK-NEXT: [[TMP8:%.*]] = fadd <2 x float> [[WIDE_LOAD]], splat (float 1.000000e+00)
+; CHECK-NEXT: [[TMP9:%.*]] = fadd <2 x float> [[WIDE_LOAD1]], splat (float 1.000000e+00)
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i32 2
+; CHECK-NEXT: store <2 x float> [[TMP8]], ptr [[TMP10]], align 4
+; CHECK-NEXT: store <2 x float> [[TMP9]], ptr [[TMP11]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1600
+; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br [[FOR_END:label %.*]]
+; CHECK: [[SCALAR_PH]]:
+;
entry:
br label %for.body
@@ -39,27 +61,47 @@ declare void @llvm.assume(i1) #0
attributes #0 = { nounwind willreturn }
-%struct.data = type { ptr, ptr }
-
-define void @test2(ptr nocapture readonly %d) {
-; CHECK-LABEL: @test2(
-; CHECK: entry:
-; CHECK: [[MASKCOND:%.*]] = icmp eq i64 %maskedptr, 0
-; CHECK: [[MASKCOND4:%.*]] = icmp eq i64 %maskedptr3, 0
-; CHECK: vector.body:
-; CHECK: tail call void @llvm.assume(i1 [[MASKCOND]])
+define void @test2(ptr noalias %a, ptr noalias %b) {
+; CHECK-LABEL: define void @test2(
+; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint ptr [[A]] to i64
+; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT: [[PTRINT2:%.*]] = ptrtoint ptr [[B]] to i64
+; CHECK-NEXT: [[MASKEDPTR3:%.*]] = and i64 [[PTRINT2]], 31
+; CHECK-NEXT: [[MASKCOND4:%.*]] = icmp eq i64 [[MASKEDPTR3]], 0
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
-; CHECK: tail call void @llvm.assume(i1 [[MASKCOND4]])
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i32 2
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP3]], align 4
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x float>, ptr [[TMP4]], align 4
+; CHECK-NEXT: [[TMP5:%.*]] = fadd <2 x float> [[WIDE_LOAD]], splat (float 1.000000e+00)
+; CHECK-NEXT: [[TMP6:%.*]] = fadd <2 x float> [[WIDE_LOAD1]], splat (float 1.000000e+00)
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND4]])
-; CHECK: for.body:
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND4]])
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 2
+; CHECK-NEXT: store <2 x float> [[TMP5]], ptr [[TMP7]], align 4
+; CHECK-NEXT: store <2 x float> [[TMP6]], ptr [[TMP8]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1600
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br [[FOR_END:label %.*]]
+; CHECK: [[SCALAR_PH]]:
+;
entry:
- %b = getelementptr inbounds %struct.data, ptr %d, i64 0, i32 1
- %0 = load ptr, ptr %b, align 8
- %ptrint = ptrtoint ptr %0 to i64
+ %ptrint = ptrtoint ptr %a to i64
%maskedptr = and i64 %ptrint, 31
%maskcond = icmp eq i64 %maskedptr, 0
- %1 = load ptr, ptr %d, align 8
- %ptrint2 = ptrtoint ptr %1 to i64
+ %ptrint2 = ptrtoint ptr %b to i64
%maskedptr3 = and i64 %ptrint2, 31
%maskcond4 = icmp eq i64 %maskedptr3, 0
br label %for.body
@@ -68,11 +110,11 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
tail call void @llvm.assume(i1 %maskcond)
- %arrayidx = getelementptr inbounds float, ptr %0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, ptr %a, i64 %indvars.iv
%2 = load float, ptr %arrayidx, align 4
%add = fadd float %2, 1.000000e+00
tail call void @llvm.assume(i1 %maskcond4)
- %arrayidx5 = getelementptr inbounds float, ptr %1, i64 %indvars.iv
+ %arrayidx5 = getelementptr inbounds float, ptr %b, i64 %indvars.iv
store float %add, ptr %arrayidx5, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv, 1599
@@ -87,10 +129,46 @@ for.end: ; preds = %for.body
; in the vector body.
define void @predicated_assume(ptr noalias nocapture readonly %a, ptr noalias nocapture %b, i32 %n) {
; Check that the vector.body does not contain any assumes.
-; CHECK-LABEL: @predicated_assume(
-; CHECK: vector.body:
-; CHECK-NOT: llvm.assume
-; CHECK: for.body:
+; CHECK-LABEL: define void @predicated_assume(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i32 [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[CMP15:%.*]] = icmp eq i32 [[N]], 0
+; CHECK-NEXT: br i1 [[CMP15]], [[FOR_COND_CLEANUP:label %.*]], label %[[FOR_BODY_PREHEADER:.*]]
+; CHECK: [[FOR_BODY_PREHEADER]]:
+; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 4
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ <i64 0, i64 1>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <2 x i64> [[VEC_IND]], splat (i64 495616)
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ult <2 x i64> [[STEP_ADD]], splat (i64 495616)
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP1]], <2 x float> splat (float 2.300000e+01), <2 x float> splat (float 4.200000e+01)
+; CHECK-NEXT: [[PREDPHI1:%.*]] = select <2 x i1> [[TMP2]], <2 x float> splat (float 2.300000e+01), <2 x float> splat (float 4.200000e+01)
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i32 2
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP3]], align 4
+; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x float>, ptr [[TMP4]], align 4
+; CHECK-NEXT: [[TMP5:%.*]] = fmul <2 x float> [[PREDPHI]], [[WIDE_LOAD]]
+; CHECK-NEXT: [[TMP6:%.*]] = fmul <2 x float> [[PREDPHI1]], [[WIDE_LOAD2]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 2
+; CHECK-NEXT: store <2 x float> [[TMP5]], ptr [[TMP7]], align 4
+; CHECK-NEXT: store <2 x float> [[TMP6]], ptr [[TMP8]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[STEP_ADD]], splat (i64 2)
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], [[FOR_COND_CLEANUP_LOOPEXIT:label %.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+;
entry:
%cmp15 = icmp eq i32 %n, 0
br i1 %cmp15, label %for.cond.cleanup, label %for.body.preheader
diff --git a/llvm/test/Transforms/LoopVectorize/dead_instructions.ll b/llvm/test/Transforms/LoopVectorize/dead_instructions.ll
index 42d45bd..8ac33a1 100644
--- a/llvm/test/Transforms/LoopVectorize/dead_instructions.ll
+++ b/llvm/test/Transforms/LoopVectorize/dead_instructions.ll
@@ -102,9 +102,9 @@ define void @pr47390(ptr %a) {
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[PRIMARY:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[PRIMARY_ADD:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[USE_PRIMARY:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], %[[SCALAR_PH]] ], [ [[PRIMARY]], %[[LOOP]] ]
-; CHECK-NEXT: [[SECONDARY:%.*]] = phi i32 [ [[BC_RESUME_VAL2]], %[[SCALAR_PH]] ], [ [[SECONDARY_ADD:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[PRIMARY:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[PRIMARY_ADD:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[USE_PRIMARY:%.*]] = phi i32 [ -1, %[[SCALAR_PH]] ], [ [[PRIMARY]], %[[LOOP]] ]
+; CHECK-NEXT: [[SECONDARY:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[SECONDARY_ADD:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[PRIMARY_ADD]] = add i32 [[PRIMARY]], 1
; CHECK-NEXT: [[SECONDARY_ADD]] = add i32 [[SECONDARY]], 1
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[SECONDARY]]
diff --git a/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-variable-size.ll b/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-variable-size.ll
index c8cf2ad..9852f53 100644
--- a/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-variable-size.ll
+++ b/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-variable-size.ll
@@ -540,3 +540,227 @@ loop.latch:
exit:
ret void
}
+
+; The start access is SCEV with non-constant offset because of variable `iv.start`
+; for IV.
+define void @deref_assumption_loop_access_start_variable(i8 %v, ptr noundef %P, i64 range(i64 0, 2000) %N, ptr noalias %b, ptr noalias %c, i64 range(i64 0, 2000) %iv.start) nofree nosync {
+; CHECK-LABEL: define void @deref_assumption_loop_access_start_variable(
+; CHECK-SAME: i8 [[V:%.*]], ptr noundef [[P:%.*]], i64 range(i64 0, 2000) [[N:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 range(i64 0, 2000) [[IV_START:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[A:%.*]] = getelementptr i8, ptr [[P]], i64 16
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[IV_START]], [[N]]
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[N]], 4
+; CHECK-NEXT: [[ADD:%.*]] = add i64 [[MUL]], 16
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 [[ADD]]) ]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i64 [[N]], [[IV_START]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], 2
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 2
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[IV_START]], [[N_VEC]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE2:.*]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[IV_START]], [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP6]], align 1
+; CHECK-NEXT: [[TMP8:%.*]] = icmp sge <2 x i32> [[WIDE_LOAD]], zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = xor <2 x i1> [[TMP8]], splat (i1 true)
+; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0
+; CHECK-NEXT: br i1 [[TMP5]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]]
+; CHECK: [[PRED_LOAD_IF]]:
+; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP16]]
+; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP7]], align 1
+; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x i32> poison, i32 [[TMP19]], i32 0
+; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]]
+; CHECK: [[PRED_LOAD_CONTINUE]]:
+; CHECK-NEXT: [[TMP10:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP9]], %[[PRED_LOAD_IF]] ]
+; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1
+; CHECK-NEXT: br i1 [[TMP11]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2]]
+; CHECK: [[PRED_LOAD_IF1]]:
+; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX]], 1
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP12]]
+; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 1
+; CHECK-NEXT: [[TMP15:%.*]] = insertelement <2 x i32> [[TMP10]], i32 [[TMP14]], i32 1
+; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE2]]
+; CHECK: [[PRED_LOAD_CONTINUE2]]:
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = phi <2 x i32> [ [[TMP10]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP15]], %[[PRED_LOAD_IF1]] ]
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP8]], <2 x i32> [[WIDE_LOAD]], <2 x i32> [[WIDE_LOAD1]]
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP17]], align 1
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP1]], %[[MIDDLE_BLOCK]] ], [ [[IV_START]], %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
+; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 1
+; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0
+; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]]
+; CHECK: [[LOOP_THEN]]:
+; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 1
+; CHECK-NEXT: br label %[[LOOP_LATCH]]
+; CHECK: [[LOOP_LATCH]]:
+; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP]] ]
+; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]]
+; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 1
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[TERM_COND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TERM_COND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+
+entry:
+ %a = getelementptr i8, ptr %P, i64 16
+ %cmp = icmp slt i64 %iv.start, %N
+ call void @llvm.assume(i1 %cmp)
+ %mul = mul i64 %N, 4
+ %add = add i64 %mul, 16
+ call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %P, i64 %add) ]
+ br label %loop
+
+loop: ; preds = %mainloop, %loop.latch
+ %iv = phi i64 [ %iv.next, %loop.latch ], [ %iv.start, %entry ]
+ %gep.a = getelementptr inbounds i32, ptr %a, i64 %iv
+ %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
+ %l.b = load i32, ptr %gep.b, align 1
+ %c.1 = icmp sge i32 %l.b, 0
+ br i1 %c.1, label %loop.latch, label %loop.then
+
+loop.then: ; preds = %loop
+ %l.a = load i32, ptr %gep.a, align 1
+ br label %loop.latch
+
+loop.latch: ; preds = %loop.then, %loop
+ %merge = phi i32 [ %l.a, %loop.then ], [ %l.b, %loop ]
+ %gep.c = getelementptr inbounds i32, ptr %c, i64 %iv
+ store i32 %merge, ptr %gep.c, align 1
+ %iv.next = add nuw nsw i64 %iv, 1
+ %term.cond = icmp slt i64 %iv.next, %N
+ br i1 %term.cond, label %loop, label %exit
+
+exit:
+ ret void
+}
+
+; Same as previous test, but `iv.start` is not known nonnegative.
+define void @deref_assumption_loop_access_start_variable_unknown_range(i8 %v, ptr noundef %P, i64 range(i64 0, 2000) %N, ptr noalias %b, ptr noalias %c, i64 %iv.start) nofree nosync {
+; CHECK-LABEL: define void @deref_assumption_loop_access_start_variable_unknown_range(
+; CHECK-SAME: i8 [[V:%.*]], ptr noundef [[P:%.*]], i64 range(i64 0, 2000) [[N:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 [[IV_START:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[A:%.*]] = getelementptr i8, ptr [[P]], i64 16
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[IV_START]], [[N]]
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[N]], 4
+; CHECK-NEXT: [[ADD:%.*]] = add i64 [[MUL]], 16
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 [[ADD]]) ]
+; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[N]], [[IV_START]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 2
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 2
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[IV_START]], [[N_VEC]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE2:.*]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[IV_START]], [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP2]], align 1
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sge <2 x i32> [[WIDE_LOAD]], zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = xor <2 x i1> [[TMP3]], splat (i1 true)
+; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0
+; CHECK-NEXT: br i1 [[TMP5]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]]
+; CHECK: [[PRED_LOAD_IF]]:
+; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 1
+; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x i32> poison, i32 [[TMP8]], i32 0
+; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]]
+; CHECK: [[PRED_LOAD_CONTINUE]]:
+; CHECK-NEXT: [[TMP10:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP9]], %[[PRED_LOAD_IF]] ]
+; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1
+; CHECK-NEXT: br i1 [[TMP11]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2]]
+; CHECK: [[PRED_LOAD_IF1]]:
+; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX]], 1
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP12]]
+; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 1
+; CHECK-NEXT: [[TMP15:%.*]] = insertelement <2 x i32> [[TMP10]], i32 [[TMP14]], i32 1
+; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE2]]
+; CHECK: [[PRED_LOAD_CONTINUE2]]:
+; CHECK-NEXT: [[TMP16:%.*]] = phi <2 x i32> [ [[TMP10]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP15]], %[[PRED_LOAD_IF1]] ]
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP3]], <2 x i32> [[WIDE_LOAD]], <2 x i32> [[TMP16]]
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP17]], align 1
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP1]], %[[MIDDLE_BLOCK]] ], [ [[IV_START]], %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
+; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 1
+; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0
+; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]]
+; CHECK: [[LOOP_THEN]]:
+; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 1
+; CHECK-NEXT: br label %[[LOOP_LATCH]]
+; CHECK: [[LOOP_LATCH]]:
+; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP]] ]
+; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]]
+; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 1
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[TERM_COND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TERM_COND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ %a = getelementptr i8, ptr %P, i64 16
+ %cmp = icmp slt i64 %iv.start, %N
+ call void @llvm.assume(i1 %cmp)
+ %mul = mul i64 %N, 4
+ %add = add i64 %mul, 16
+ call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %P, i64 %add) ]
+ br label %loop
+
+loop: ; preds = %mainloop, %loop.latch
+ %iv = phi i64 [ %iv.next, %loop.latch ], [ %iv.start, %entry ]
+ %gep.a = getelementptr inbounds i32, ptr %a, i64 %iv
+ %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
+ %l.b = load i32, ptr %gep.b, align 1
+ %c.1 = icmp sge i32 %l.b, 0
+ br i1 %c.1, label %loop.latch, label %loop.then
+
+loop.then: ; preds = %loop
+ %l.a = load i32, ptr %gep.a, align 1
+ br label %loop.latch
+
+loop.latch: ; preds = %loop.then, %loop
+ %merge = phi i32 [ %l.a, %loop.then ], [ %l.b, %loop ]
+ %gep.c = getelementptr inbounds i32, ptr %c, i64 %iv
+ store i32 %merge, ptr %gep.c, align 1
+ %iv.next = add nuw nsw i64 %iv, 1
+ %term.cond = icmp slt i64 %iv.next, %N
+ br i1 %term.cond, label %loop, label %exit
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll
index 1936b40..d666487 100644
--- a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll
+++ b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll
@@ -203,7 +203,7 @@ define dso_local void @cannotProveAlignedTC(ptr noalias nocapture %A, i32 %p, i3
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ [[RIVPLUS1:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ [[RIVPLUS1:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]]
; CHECK-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/epilog-vectorization-reductions.ll b/llvm/test/Transforms/LoopVectorize/epilog-vectorization-reductions.ll
index 2d0d30d..5e97ced 100644
--- a/llvm/test/Transforms/LoopVectorize/epilog-vectorization-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/epilog-vectorization-reductions.ll
@@ -477,3 +477,192 @@ for.cond:
for.end:
ret i32 %sub
}
+
+define i64 @test_reduction_with_widen_induction_order_1(ptr %A, i64 %N) {
+; CHECK-LABEL: @test_reduction_with_widen_induction_order_1(
+; CHECK-NEXT: iter.check:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]]
+; CHECK: vector.main.loop.iter.check:
+; CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[N]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP1:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[TMP1]] = add <4 x i64> [[VEC_PHI]], [[WIDE_LOAD]]
+; CHECK-NEXT: store <4 x i64> [[VEC_IND]], ptr [[TMP0]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
+; CHECK: vec.epilog.iter.check:
+; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4
+; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
+; CHECK: vec.epilog.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP3]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT: [[N_MOD_VF2:%.*]] = urem i64 [[N]], 4
+; CHECK-NEXT: [[N_VEC3:%.*]] = sub i64 [[N]], [[N_MOD_VF2]]
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i64> zeroinitializer, i64 [[BC_MERGE_RDX]], i32 0
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[BC_RESUME_VAL]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3>
+; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
+; CHECK: vec.epilog.vector.body:
+; CHECK-NEXT: [[INDEX4:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT8:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND5:%.*]] = phi <4 x i64> [ [[INDUCTION]], [[VEC_EPILOG_PH]] ], [ [[VEC_IND_NEXT9:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI6:%.*]] = phi <4 x i64> [ [[TMP4]], [[VEC_EPILOG_PH]] ], [ [[TMP6:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX4]]
+; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i64>, ptr [[TMP5]], align 4
+; CHECK-NEXT: [[TMP6]] = add <4 x i64> [[VEC_PHI6]], [[WIDE_LOAD7]]
+; CHECK-NEXT: store <4 x i64> [[VEC_IND5]], ptr [[TMP5]], align 4
+; CHECK-NEXT: [[INDEX_NEXT8]] = add nuw i64 [[INDEX4]], 4
+; CHECK-NEXT: [[VEC_IND_NEXT9]] = add <4 x i64> [[VEC_IND5]], splat (i64 4)
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT8]], [[N_VEC3]]
+; CHECK-NEXT: br i1 [[TMP7]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK: vec.epilog.middle.block:
+; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP6]])
+; CHECK-NEXT: [[CMP_N10:%.*]] = icmp eq i64 [[N]], [[N_VEC3]]
+; CHECK-NEXT: br i1 [[CMP_N10]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
+; CHECK: vec.epilog.scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL11:%.*]] = phi i64 [ [[N_VEC3]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX12:%.*]] = phi i64 [ [[TMP8]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP3]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK]] ]
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL11]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[RED:%.*]] = phi i64 [ [[BC_MERGE_RDX12]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV_1]]
+; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[GEP_A]], align 4
+; CHECK-NEXT: [[RED_NEXT]] = add i64 [[RED]], [[L]]
+; CHECK-NEXT: store i64 [[IV_1]], ptr [[GEP_A]], align 4
+; CHECK-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_1_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK: exit:
+; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i64 [ [[RED_NEXT]], [[LOOP]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ], [ [[TMP8]], [[VEC_EPILOG_MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i64 [[RED_NEXT_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv.1 = phi i64 [ 0, %entry ], [ %iv.1.next, %loop ]
+ %red = phi i64 [ 0, %entry ], [ %red.next, %loop ]
+ %gep.A = getelementptr inbounds i64, ptr %A, i64 %iv.1
+ %l = load i64, ptr %gep.A
+ %red.next = add i64 %red, %l
+ store i64 %iv.1, ptr %gep.A, align 4
+ %iv.1.next = add nuw nsw i64 %iv.1, 1
+ %exitcond = icmp eq i64 %iv.1.next, %N
+ br i1 %exitcond, label %exit, label %loop
+
+exit:
+ ret i64 %red.next
+}
+
+; Same as @test_reduction_with_widen_induction_order_1, but with phi order flipped.
+define i64 @test_reduction_with_widen_induction_order_2(ptr %A, i64 %N) {
+; CHECK-LABEL: @test_reduction_with_widen_induction_order_2(
+; CHECK-NEXT: iter.check:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]]
+; CHECK: vector.main.loop.iter.check:
+; CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[N]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP1:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[TMP1]] = add <4 x i64> [[VEC_PHI]], [[WIDE_LOAD]]
+; CHECK-NEXT: store <4 x i64> [[VEC_IND]], ptr [[TMP0]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
+; CHECK: vec.epilog.iter.check:
+; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4
+; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
+; CHECK: vec.epilog.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP3]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
+; CHECK-NEXT: [[N_MOD_VF2:%.*]] = urem i64 [[N]], 4
+; CHECK-NEXT: [[N_VEC3:%.*]] = sub i64 [[N]], [[N_MOD_VF2]]
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i64> zeroinitializer, i64 [[BC_MERGE_RDX]], i32 0
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[BC_RESUME_VAL]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3>
+; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
+; CHECK: vec.epilog.vector.body:
+; CHECK-NEXT: [[INDEX4:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT8:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI5:%.*]] = phi <4 x i64> [ [[TMP4]], [[VEC_EPILOG_PH]] ], [ [[TMP6:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND6:%.*]] = phi <4 x i64> [ [[INDUCTION]], [[VEC_EPILOG_PH]] ], [ [[VEC_IND_NEXT9:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX4]]
+; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i64>, ptr [[TMP5]], align 4
+; CHECK-NEXT: [[TMP6]] = add <4 x i64> [[VEC_PHI5]], [[WIDE_LOAD7]]
+; CHECK-NEXT: store <4 x i64> [[VEC_IND6]], ptr [[TMP5]], align 4
+; CHECK-NEXT: [[INDEX_NEXT8]] = add nuw i64 [[INDEX4]], 4
+; CHECK-NEXT: [[VEC_IND_NEXT9]] = add <4 x i64> [[VEC_IND6]], splat (i64 4)
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT8]], [[N_VEC3]]
+; CHECK-NEXT: br i1 [[TMP7]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK: vec.epilog.middle.block:
+; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP6]])
+; CHECK-NEXT: [[CMP_N10:%.*]] = icmp eq i64 [[N]], [[N_VEC3]]
+; CHECK-NEXT: br i1 [[CMP_N10]], label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
+; CHECK: vec.epilog.scalar.ph:
+; CHECK-NEXT: [[BC_MERGE_RDX11:%.*]] = phi i64 [ [[TMP8]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[TMP3]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL12:%.*]] = phi i64 [ [[N_VEC3]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK]] ]
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[RED:%.*]] = phi i64 [ [[BC_MERGE_RDX11]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL12]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV_1]]
+; CHECK-NEXT: [[L:%.*]] = load i64, ptr [[GEP_A]], align 4
+; CHECK-NEXT: [[RED_NEXT]] = add i64 [[RED]], [[L]]
+; CHECK-NEXT: store i64 [[IV_1]], ptr [[GEP_A]], align 4
+; CHECK-NEXT: [[IV_1_NEXT]] = add nuw nsw i64 [[IV_1]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_1_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP22:![0-9]+]]
+; CHECK: exit:
+; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i64 [ [[RED_NEXT]], [[LOOP]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ], [ [[TMP8]], [[VEC_EPILOG_MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i64 [[RED_NEXT_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop:
+ %red = phi i64 [ 0, %entry ], [ %red.next, %loop ]
+ %iv.1 = phi i64 [ 0, %entry ], [ %iv.1.next, %loop ]
+ %gep.A = getelementptr inbounds i64, ptr %A, i64 %iv.1
+ %l = load i64, ptr %gep.A
+ %red.next = add i64 %red, %l
+ store i64 %iv.1, ptr %gep.A, align 4
+ %iv.1.next = add nuw nsw i64 %iv.1, 1
+ %exitcond = icmp eq i64 %iv.1.next, %N
+ br i1 %exitcond, label %exit, label %loop
+
+exit:
+ ret i64 %red.next
+}
diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll
index 3adfcf5..db97bdf 100644
--- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll
@@ -2750,9 +2750,9 @@ define i32 @sink_into_replication_region(i32 %y) {
; UNROLL-NO-IC-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP51]], [[MIDDLE_BLOCK]] ]
; UNROLL-NO-IC-NEXT: ret i32 [[VAR]]
; UNROLL-NO-IC: bb2:
-; UNROLL-NO-IC-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; UNROLL-NO-IC-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
-; UNROLL-NO-IC-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; UNROLL-NO-IC-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH]] ]
+; UNROLL-NO-IC-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
+; UNROLL-NO-IC-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
; UNROLL-NO-IC-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]]
; UNROLL-NO-IC-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]]
; UNROLL-NO-IC-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1
@@ -2813,9 +2813,9 @@ define i32 @sink_into_replication_region(i32 %y) {
; UNROLL-NO-VF-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ]
; UNROLL-NO-VF-NEXT: ret i32 [[VAR]]
; UNROLL-NO-VF: bb2:
-; UNROLL-NO-VF-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; UNROLL-NO-VF-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
-; UNROLL-NO-VF-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; UNROLL-NO-VF-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH]] ]
+; UNROLL-NO-VF-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
+; UNROLL-NO-VF-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
; UNROLL-NO-VF-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]]
; UNROLL-NO-VF-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]]
; UNROLL-NO-VF-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1
@@ -2899,9 +2899,9 @@ define i32 @sink_into_replication_region(i32 %y) {
; SINK-AFTER-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP27]], [[MIDDLE_BLOCK]] ]
; SINK-AFTER-NEXT: ret i32 [[VAR]]
; SINK-AFTER: bb2:
-; SINK-AFTER-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; SINK-AFTER-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
-; SINK-AFTER-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; SINK-AFTER-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH]] ]
+; SINK-AFTER-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
+; SINK-AFTER-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
; SINK-AFTER-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]]
; SINK-AFTER-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]]
; SINK-AFTER-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1
@@ -3113,10 +3113,10 @@ define i32 @sink_into_replication_region_multiple(ptr %x, i32 %y) {
; UNROLL-NO-IC-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP75]], [[MIDDLE_BLOCK]] ]
; UNROLL-NO-IC-NEXT: ret i32 [[VAR]]
; UNROLL-NO-IC: bb2:
-; UNROLL-NO-IC-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; UNROLL-NO-IC-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
-; UNROLL-NO-IC-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
-; UNROLL-NO-IC-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; UNROLL-NO-IC-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH]] ]
+; UNROLL-NO-IC-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
+; UNROLL-NO-IC-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
+; UNROLL-NO-IC-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
; UNROLL-NO-IC-NEXT: [[G:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[IV]]
; UNROLL-NO-IC-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]]
; UNROLL-NO-IC-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]]
@@ -3194,10 +3194,10 @@ define i32 @sink_into_replication_region_multiple(ptr %x, i32 %y) {
; UNROLL-NO-VF-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ]
; UNROLL-NO-VF-NEXT: ret i32 [[VAR]]
; UNROLL-NO-VF: bb2:
-; UNROLL-NO-VF-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; UNROLL-NO-VF-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
-; UNROLL-NO-VF-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
-; UNROLL-NO-VF-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; UNROLL-NO-VF-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH]] ]
+; UNROLL-NO-VF-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
+; UNROLL-NO-VF-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
+; UNROLL-NO-VF-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
; UNROLL-NO-VF-NEXT: [[G:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[IV]]
; UNROLL-NO-VF-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]]
; UNROLL-NO-VF-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]]
@@ -3316,10 +3316,10 @@ define i32 @sink_into_replication_region_multiple(ptr %x, i32 %y) {
; SINK-AFTER-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP39]], [[MIDDLE_BLOCK]] ]
; SINK-AFTER-NEXT: ret i32 [[VAR]]
; SINK-AFTER: bb2:
-; SINK-AFTER-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; SINK-AFTER-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
-; SINK-AFTER-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
-; SINK-AFTER-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; SINK-AFTER-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH]] ]
+; SINK-AFTER-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
+; SINK-AFTER-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
+; SINK-AFTER-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
; SINK-AFTER-NEXT: [[G:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[IV]]
; SINK-AFTER-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]]
; SINK-AFTER-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]]
diff --git a/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll b/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll
index a0068f0..d6acba5 100644
--- a/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll
+++ b/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll
@@ -473,8 +473,8 @@ define i16 @select_decreasing_induction_icmp_table_i16(i16 noundef %val) {
; IC4VF4-NEXT: [[BC_MERGE_RDX:%.*]] = phi i16 [ 0, %[[ENTRY]] ]
; IC4VF4-NEXT: br label %[[LOOP:.*]]
; IC4VF4: [[LOOP]]:
-; IC4VF4-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; IC4VF4-NEXT: [[RDX:%.*]] = phi i16 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ]
+; IC4VF4-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; IC4VF4-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ]
; IC4VF4-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]]
; IC4VF4-NEXT: [[LD_TABLE:%.*]] = load i16, ptr [[GEP_TABLE_IV]], align 1
; IC4VF4-NEXT: [[CMP_TABLE_VAL:%.*]] = icmp ugt i16 [[LD_TABLE]], [[VAL]]
@@ -844,8 +844,8 @@ define i16 @select_decreasing_induction_icmp_table_half(half noundef %val) {
; IC4VF4-NEXT: [[BC_MERGE_RDX:%.*]] = phi i16 [ 0, %[[ENTRY]] ]
; IC4VF4-NEXT: br label %[[LOOP:.*]]
; IC4VF4: [[LOOP]]:
-; IC4VF4-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; IC4VF4-NEXT: [[RDX:%.*]] = phi i16 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ]
+; IC4VF4-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; IC4VF4-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ]
; IC4VF4-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]]
; IC4VF4-NEXT: [[LD_TABLE:%.*]] = load half, ptr [[GEP_TABLE_IV]], align 1
; IC4VF4-NEXT: [[CMP_TABLE_VAL:%.*]] = fcmp ugt half [[LD_TABLE]], [[VAL]]
diff --git a/llvm/test/Transforms/LoopVectorize/loop-form.ll b/llvm/test/Transforms/LoopVectorize/loop-form.ll
index 10b2e70..22ebf92 100644
--- a/llvm/test/Transforms/LoopVectorize/loop-form.ll
+++ b/llvm/test/Transforms/LoopVectorize/loop-form.ll
@@ -84,7 +84,7 @@ define void @bottom_tested(ptr %p, i32 %n) {
; TAILFOLD-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; TAILFOLD-NEXT: br label [[FOR_COND:%.*]]
; TAILFOLD: for.cond:
-; TAILFOLD-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_COND]] ]
+; TAILFOLD-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_COND]] ]
; TAILFOLD-NEXT: [[IPROM:%.*]] = sext i32 [[I]] to i64
; TAILFOLD-NEXT: [[B:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IPROM]]
; TAILFOLD-NEXT: store i16 0, ptr [[B]], align 4
diff --git a/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll b/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll
index c9066f2..72bc181 100644
--- a/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll
+++ b/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll
@@ -74,7 +74,7 @@ define void @maxvf3() {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[J:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[J_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[J:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[J_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[AJ:%.*]] = getelementptr inbounds [18 x i8], ptr @a, i32 0, i32 [[J]]
; CHECK-NEXT: store i8 69, ptr [[AJ]], align 8
; CHECK-NEXT: [[JP3:%.*]] = add nuw nsw i32 3, [[J]]
diff --git a/llvm/test/Transforms/LoopVectorize/optsize.ll b/llvm/test/Transforms/LoopVectorize/optsize.ll
index f0d026b..b9ee09e 100644
--- a/llvm/test/Transforms/LoopVectorize/optsize.ll
+++ b/llvm/test/Transforms/LoopVectorize/optsize.ll
@@ -626,6 +626,7 @@ define i32 @pr45526_pgso() !prof !14 {
; NPGSO-NEXT: br i1 [[TMP1]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; NPGSO: [[MIDDLE_BLOCK]]:
; NPGSO-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3
+; NPGSO-NEXT: br label %[[SCALAR_PH]]
; NPGSO: [[SCALAR_PH]]:
; NPGSO-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 508, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
; NPGSO-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 5, %[[ENTRY]] ]
@@ -698,7 +699,7 @@ define void @stride1(ptr noalias %B, i32 %BStride) optsize {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
; CHECK-NEXT: [[MULB:%.*]] = mul nsw i32 [[IV]], [[BSTRIDE]]
; CHECK-NEXT: [[GEPOFB:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[MULB]]
; CHECK-NEXT: store i16 42, ptr [[GEPOFB]], align 4
@@ -747,7 +748,7 @@ define void @stride1(ptr noalias %B, i32 %BStride) optsize {
; PGSO-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[ENTRY]] ]
; PGSO-NEXT: br label %[[FOR_BODY:.*]]
; PGSO: [[FOR_BODY]]:
-; PGSO-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; PGSO-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
; PGSO-NEXT: [[MULB:%.*]] = mul nsw i32 [[IV]], [[BSTRIDE]]
; PGSO-NEXT: [[GEPOFB:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[MULB]]
; PGSO-NEXT: store i16 42, ptr [[GEPOFB]], align 4
@@ -796,7 +797,7 @@ define void @stride1(ptr noalias %B, i32 %BStride) optsize {
; NPGSO-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[ENTRY]] ]
; NPGSO-NEXT: br label %[[FOR_BODY:.*]]
; NPGSO: [[FOR_BODY]]:
-; NPGSO-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; NPGSO-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
; NPGSO-NEXT: [[MULB:%.*]] = mul nsw i32 [[IV]], [[BSTRIDE]]
; NPGSO-NEXT: [[GEPOFB:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[MULB]]
; NPGSO-NEXT: store i16 42, ptr [[GEPOFB]], align 4
diff --git a/llvm/test/Transforms/LoopVectorize/outer-loop-vec-phi-predecessor-order.ll b/llvm/test/Transforms/LoopVectorize/outer-loop-vec-phi-predecessor-order.ll
index 1cf410c..32b1fc4 100644
--- a/llvm/test/Transforms/LoopVectorize/outer-loop-vec-phi-predecessor-order.ll
+++ b/llvm/test/Transforms/LoopVectorize/outer-loop-vec-phi-predecessor-order.ll
@@ -35,7 +35,7 @@ define void @test(ptr %src, i64 %n) {
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq <4 x i64> [[TMP2]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP3]], i32 0
; CHECK-NEXT: br i1 [[TMP4]], label [[LOOP_2_LATCH4]], label [[LOOP_32]]
-; CHECK: loop.2.latch4:
+; CHECK: loop.2.latch3:
; CHECK-NEXT: [[TMP5]] = add nuw nsw <4 x i64> [[VEC_PHI]], splat (i64 1)
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq <4 x i64> [[TMP5]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i1> [[TMP6]], i32 0
diff --git a/llvm/test/Transforms/LoopVectorize/outer_loop_test1.ll b/llvm/test/Transforms/LoopVectorize/outer_loop_test1.ll
index 80e7de7..6bc2f38 100644
--- a/llvm/test/Transforms/LoopVectorize/outer_loop_test1.ll
+++ b/llvm/test/Transforms/LoopVectorize/outer_loop_test1.ll
@@ -1,3 +1,6 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "^scalar.ph" --version 5
+; RUN: opt -S -passes=loop-vectorize -enable-vplan-native-path -verify-loop-info -verify-dom-info < %s | FileCheck %s
+
; extern int arr[8][8];
; extern int arr2[8];
;
@@ -13,41 +16,46 @@
; }
; }
;
-; RUN: opt -S -passes=loop-vectorize -enable-vplan-native-path -verify-loop-info -verify-dom-info < %s | FileCheck %s
-; CHECK-LABEL: vector.ph:
-; CHECK: %[[SplatVal:.*]] = insertelement <4 x i32> poison, i32 %n, i64 0
-; CHECK: %[[Splat:.*]] = shufflevector <4 x i32> %[[SplatVal]], <4 x i32> poison, <4 x i32> zeroinitializer
-
-; CHECK-LABEL: vector.body:
-; CHECK: %[[Ind:.*]] = phi i64 [ 0, %vector.ph ], [ %[[IndNext:.*]], %[[ForInc:.*]] ]
-; CHECK: %[[VecInd:.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %vector.ph ], [ %[[VecIndNext:.*]], %[[ForInc]] ]
-; CHECK: %[[AAddr:.*]] = getelementptr inbounds [8 x i32], ptr @arr2, i64 0, <4 x i64> %[[VecInd]]
-; CHECK: %[[VecIndTr:.*]] = trunc <4 x i64> %[[VecInd]] to <4 x i32>
-; CHECK: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %[[VecIndTr]], <4 x ptr> %[[AAddr]], i32 4, <4 x i1> splat (i1 true))
-; CHECK: %[[VecIndTr2:.*]] = trunc <4 x i64> %[[VecInd]] to <4 x i32>
-; CHECK: %[[StoreVal:.*]] = add nsw <4 x i32> %[[VecIndTr2]], %[[Splat]]
-; CHECK: br label %[[InnerLoop:.+]]
-
-; CHECK: [[InnerLoop]]:
-; CHECK: %[[InnerPhi:.*]] = phi <4 x i64> [ zeroinitializer, %vector.body ], [ %[[InnerPhiNext:.*]], %[[InnerLoop]] ]
-; CHECK: %[[AAddr2:.*]] = getelementptr inbounds [8 x [8 x i32]], ptr @arr, i64 0, <4 x i64> %[[InnerPhi]], <4 x i64> %[[VecInd]]
-; CHECK: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %[[StoreVal]], <4 x ptr> %[[AAddr2]], i32 4, <4 x i1> splat (i1 true))
-; CHECK: %[[InnerPhiNext]] = add nuw nsw <4 x i64> %[[InnerPhi]], splat (i64 1)
-; CHECK: %[[VecCond:.*]] = icmp eq <4 x i64> %[[InnerPhiNext]], splat (i64 8)
-; CHECK: %[[InnerCond:.*]] = extractelement <4 x i1> %[[VecCond]], i32 0
-; CHECK: br i1 %[[InnerCond]], label %[[ForInc]], label %[[InnerLoop]]
-
-; CHECK: [[ForInc]]:
-; CHECK: %[[IndNext]] = add nuw i64 %[[Ind]], 4
-; CHECK: %[[VecIndNext]] = add <4 x i64> %[[VecInd]], splat (i64 4)
-; CHECK: %[[Cmp:.*]] = icmp eq i64 %[[IndNext]], 8
-; CHECK: br i1 %[[Cmp]], label %middle.block, label %vector.body
@arr2 = external global [8 x i32], align 16
@arr = external global [8 x [8 x i32]], align 16
; Function Attrs: norecurse nounwind uwtable
define void @foo(i32 %n) {
+; CHECK-LABEL: define void @foo(
+; CHECK-SAME: i32 [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_LATCH:.*]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_LATCH]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [8 x i32], ptr @arr2, i64 0, <4 x i64> [[VEC_IND]]
+; CHECK-NEXT: [[TMP1:%.*]] = trunc <4 x i64> [[VEC_IND]] to <4 x i32>
+; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> [[TMP1]], <4 x ptr> [[TMP0]], i32 4, <4 x i1> splat (i1 true))
+; CHECK-NEXT: [[TMP8:%.*]] = trunc <4 x i64> [[VEC_IND]] to <4 x i32>
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[TMP8]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: br label %[[FOR_BODY31:.*]]
+; CHECK: [[FOR_BODY31]]:
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ zeroinitializer, %[[VECTOR_BODY]] ], [ [[TMP4:%.*]], %[[FOR_BODY31]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [8 x [8 x i32]], ptr @arr, i64 0, <4 x i64> [[VEC_PHI]], <4 x i64> [[VEC_IND]]
+; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> [[TMP2]], <4 x ptr> [[TMP3]], i32 4, <4 x i1> splat (i1 true))
+; CHECK-NEXT: [[TMP4]] = add nuw nsw <4 x i64> [[VEC_PHI]], splat (i64 1)
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i64> [[TMP4]], splat (i64 8)
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP5]], i32 0
+; CHECK-NEXT: br i1 [[TMP6]], label %[[VECTOR_LATCH]], label %[[FOR_BODY31]]
+; CHECK: [[VECTOR_LATCH]]:
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 8
+; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br i1 true, [[FOR_END10:label %.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/pointer-induction.ll b/llvm/test/Transforms/LoopVectorize/pointer-induction.ll
index 69931a0..d2c53f4 100644
--- a/llvm/test/Transforms/LoopVectorize/pointer-induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/pointer-induction.ll
@@ -231,7 +231,6 @@ define void @non_constant_vector_expansion(i32 %0, ptr %call) {
; STRIDED: vector.body:
; STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ null, [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
-; STRIDED-NEXT: [[TMP3:%.*]] = mul i64 [[TMP1]], 4
; STRIDED-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP1]], i64 0
; STRIDED-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i64> [[DOTSPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
; STRIDED-NEXT: [[TMP4:%.*]] = mul <4 x i64> <i64 0, i64 1, i64 2, i64 3>, [[DOTSPLAT]]
@@ -240,6 +239,7 @@ define void @non_constant_vector_expansion(i32 %0, ptr %call) {
; STRIDED-NEXT: [[TMP6:%.*]] = getelementptr ptr, ptr [[CALL:%.*]], i32 [[OFFSET_IDX]]
; STRIDED-NEXT: store <4 x ptr> [[VECTOR_GEP]], ptr [[TMP6]], align 4
; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; STRIDED-NEXT: [[TMP3:%.*]] = mul i64 [[TMP1]], 4
; STRIDED-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP3]]
; STRIDED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
; STRIDED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll b/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll
index c044cc0..bda91ba 100644
--- a/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll
@@ -62,7 +62,7 @@ define void @pr45679(ptr %A) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]]
; CHECK-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1
@@ -124,7 +124,7 @@ define void @pr45679(ptr %A) {
; VF2UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; VF2UF2-NEXT: br label [[LOOP:%.*]]
; VF2UF2: loop:
-; VF2UF2-NEXT: [[RIV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ]
+; VF2UF2-NEXT: [[RIV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ]
; VF2UF2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]]
; VF2UF2-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1
; VF2UF2-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1
@@ -181,7 +181,7 @@ define void @pr45679(ptr %A) {
; VF1UF4-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; VF1UF4-NEXT: br label [[LOOP:%.*]]
; VF1UF4: loop:
-; VF1UF4-NEXT: [[RIV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ]
+; VF1UF4-NEXT: [[RIV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ]
; VF1UF4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]]
; VF1UF4-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1
; VF1UF4-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1
@@ -261,7 +261,7 @@ define void @load_variant(ptr noalias %a, ptr noalias %b) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: store i64 [[V]], ptr [[B]], align 8
@@ -328,7 +328,7 @@ define void @load_variant(ptr noalias %a, ptr noalias %b) {
; VF2UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; VF2UF2-NEXT: br label [[FOR_BODY:%.*]]
; VF2UF2: for.body:
-; VF2UF2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; VF2UF2-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; VF2UF2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; VF2UF2-NEXT: [[V:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; VF2UF2-NEXT: store i64 [[V]], ptr [[B]], align 8
@@ -390,7 +390,7 @@ define void @load_variant(ptr noalias %a, ptr noalias %b) {
; VF1UF4-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; VF1UF4-NEXT: br label [[FOR_BODY:%.*]]
; VF1UF4: for.body:
-; VF1UF4-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; VF1UF4-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; VF1UF4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; VF1UF4-NEXT: [[V:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; VF1UF4-NEXT: store i64 [[V]], ptr [[B]], align 8
diff --git a/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll b/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll
index d4a6aed..7d6667c 100644
--- a/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll
@@ -36,7 +36,7 @@ define void @test(i16 %x, i64 %y, ptr %ptr) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: store i32 0, ptr [[PTR]], align 4
; CHECK-NEXT: [[V2:%.*]] = trunc i64 [[IV]] to i8
; CHECK-NEXT: [[V3:%.*]] = add i8 [[V2]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll b/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll
index 77794dc..19c9ccc 100644
--- a/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll
@@ -67,8 +67,8 @@ define dso_local i16 @reverse_interleave_load_fold_mask() optsize {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i16 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IVMINUS1:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[SUM:%.*]] = phi i16 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[PREVSUM:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 41, [[SCALAR_PH]] ], [ [[IVMINUS1:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[SUM:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[PREVSUM:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[IVMINUS1]] = add nsw i16 [[IV]], -1
; CHECK-NEXT: [[GEPA0:%.*]] = getelementptr inbounds [40 x [4 x i16]], ptr @A, i16 0, i16 [[IVMINUS1]], i16 0
; CHECK-NEXT: [[TMP29:%.*]] = load i16, ptr [[GEPA0]], align 1
diff --git a/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll b/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll
index ffe118b..90caee3 100644
--- a/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll
+++ b/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll
@@ -63,7 +63,7 @@ define void @loop_invariant_store(ptr %p, i64 %a, i8 %b) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: [[ADD]] = add i32 [[IV]], 1
; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i32 [[IV]], 2
; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[A]], 48
@@ -181,7 +181,7 @@ define void @loop_invariant_srem(ptr %p, i64 %a, i8 %b) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1
; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i8 [[IV]], 2
; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[A]], 48
@@ -253,7 +253,7 @@ define void @loop_invariant_float_store(ptr %p, i32 %a) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i32 [[IV]], 2
; CHECK-NEXT: br i1 [[CMP_SLT]], label %[[COND_FALSE:.*]], label %[[LOOP_LATCH]]
@@ -324,7 +324,7 @@ define void @test_store_to_invariant_address_needs_mask_due_to_low_trip_count(pt
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i16 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]]
; CHECK: [[ELSE]]:
; CHECK-NEXT: br label %[[LOOP_LATCH]]
diff --git a/llvm/test/Transforms/LoopVectorize/scalable-assume.ll b/llvm/test/Transforms/LoopVectorize/scalable-assume.ll
index 358293f..8291164 100644
--- a/llvm/test/Transforms/LoopVectorize/scalable-assume.ll
+++ b/llvm/test/Transforms/LoopVectorize/scalable-assume.ll
@@ -1,14 +1,52 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "^scalar.ph" --version 5
; RUN: opt < %s -scalable-vectorization=on -force-target-supports-scalable-vectors=true -passes=loop-vectorize -force-vector-width=2 -force-vector-interleave=2 -S | FileCheck %s
define void @test1(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) {
-; CHECK-LABEL: @test1(
-; CHECK: vector.body:
-; CHECK: [[E1:%.*]] = extractelement <vscale x 2 x float> {{.+}}, i32 0
-; CHECK-NEXT: [[FCMP1:%.*]] = fcmp ogt float [[E1]]
-; CHECK-NEXT: [[E2:%.*]] = extractelement <vscale x 2 x float> {{.+}}, i32 0
-; CHECK-NEXT: [[FCMP2:%.*]] = fcmp ogt float [[E2]]
+; CHECK-LABEL: define void @test1(
+; CHECK-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias readonly captures(none) [[B:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1600, [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1600, [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1600, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP8]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 2 x float>, ptr [[TMP9]], align 4
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <vscale x 2 x float> [[WIDE_LOAD]], i32 0
+; CHECK-NEXT: [[FCMP1:%.*]] = fcmp ogt float [[TMP10]], 1.000000e+02
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <vscale x 2 x float> [[WIDE_LOAD1]], i32 0
+; CHECK-NEXT: [[FCMP2:%.*]] = fcmp ogt float [[TMP12]], 1.000000e+02
; CHECK-NEXT: tail call void @llvm.assume(i1 [[FCMP1]])
; CHECK-NEXT: tail call void @llvm.assume(i1 [[FCMP2]])
+; CHECK-NEXT: [[TMP14:%.*]] = fadd <vscale x 2 x float> [[WIDE_LOAD]], splat (float 1.000000e+00)
+; CHECK-NEXT: [[TMP15:%.*]] = fadd <vscale x 2 x float> [[WIDE_LOAD1]], splat (float 1.000000e+00)
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 2
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]]
+; CHECK-NEXT: store <vscale x 2 x float> [[TMP14]], ptr [[TMP16]], align 4
+; CHECK-NEXT: store <vscale x 2 x float> [[TMP15]], ptr [[TMP19]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1600, [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], [[FOR_END:label %.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+;
entry:
br label %for.body
@@ -35,16 +73,54 @@ attributes #0 = { nounwind willreturn }
%struct.data = type { ptr, ptr }
-define void @test2(ptr %a, ptr %b) {
-; CHECK-LABEL: @test2(
-; CHECK: entry:
-; CHECK: [[MASKCOND:%.*]] = icmp eq i64 %ptrint1, 0
-; CHECK: [[MASKCOND4:%.*]] = icmp eq i64 %ptrint2, 0
-; CHECK: vector.body:
-; CHECK: tail call void @llvm.assume(i1 [[MASKCOND]])
+define void @test2(ptr %a, ptr noalias %b) {
+; CHECK-LABEL: define void @test2(
+; CHECK-SAME: ptr [[A:%.*]], ptr noalias [[B:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint ptr [[A]] to i64
+; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[PTRINT1]], 0
+; CHECK-NEXT: [[PTRINT2:%.*]] = ptrtoint ptr [[B]] to i64
+; CHECK-NEXT: [[MASKCOND4:%.*]] = icmp eq i64 [[PTRINT2]], 0
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1600, [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1600, [[TMP7]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1600, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
-; CHECK: tail call void @llvm.assume(i1 [[MASKCOND4]])
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i64 [[TMP12]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x float>, ptr [[TMP10]], align 4
+; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 2 x float>, ptr [[TMP13]], align 4
+; CHECK-NEXT: [[TMP14:%.*]] = fadd <vscale x 2 x float> [[WIDE_LOAD]], splat (float 1.000000e+00)
+; CHECK-NEXT: [[TMP15:%.*]] = fadd <vscale x 2 x float> [[WIDE_LOAD3]], splat (float 1.000000e+00)
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND4]])
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND4]])
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 2
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]]
+; CHECK-NEXT: store <vscale x 2 x float> [[TMP14]], ptr [[TMP16]], align 4
+; CHECK-NEXT: store <vscale x 2 x float> [[TMP15]], ptr [[TMP19]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]]
+; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1600, [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], [[FOR_END:label %.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+;
entry:
%ptrint1 = ptrtoint ptr %a to i64
%maskcond = icmp eq i64 %ptrint1, 0
@@ -75,10 +151,58 @@ for.end: ; preds = %for.body
; in the vector body.
define void @predicated_assume(ptr noalias nocapture readonly %a, ptr noalias nocapture %b, i64 %n) {
; Check that the vector.body does not contain any assumes.
-; CHECK-LABEL: @predicated_assume(
-; CHECK: vector.body:
-; CHECK-NOT: llvm.assume
-; CHECK: for.body:
+; CHECK-LABEL: define void @predicated_assume(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
+; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2
+; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 2 x i64> [[TMP7]], splat (i64 1)
+; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP8]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP5]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[STEP_ADD:%.*]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp ult <vscale x 2 x i64> [[VEC_IND]], splat (i64 495616)
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ult <vscale x 2 x i64> [[STEP_ADD]], splat (i64 495616)
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP9]], <vscale x 2 x float> splat (float 2.300000e+01), <vscale x 2 x float> splat (float 4.200000e+01)
+; CHECK-NEXT: [[PREDPHI1:%.*]] = select <vscale x 2 x i1> [[TMP10]], <vscale x 2 x float> splat (float 2.300000e+01), <vscale x 2 x float> splat (float 4.200000e+01)
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 2
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i64 [[TMP13]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x float>, ptr [[TMP11]], align 4
+; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 2 x float>, ptr [[TMP14]], align 4
+; CHECK-NEXT: [[TMP15:%.*]] = fmul <vscale x 2 x float> [[PREDPHI]], [[WIDE_LOAD]]
+; CHECK-NEXT: [[TMP16:%.*]] = fmul <vscale x 2 x float> [[PREDPHI1]], [[WIDE_LOAD2]]
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 2
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[TMP19]]
+; CHECK-NEXT: store <vscale x 2 x float> [[TMP15]], ptr [[TMP17]], align 4
+; CHECK-NEXT: store <vscale x 2 x float> [[TMP16]], ptr [[TMP20]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[STEP_ADD]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], [[FOR_COND_CLEANUP:label %.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+;
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/scalable-predication.ll b/llvm/test/Transforms/LoopVectorize/scalable-predication.ll
index 8e272de..a3a4c29 100644
--- a/llvm/test/Transforms/LoopVectorize/scalable-predication.ll
+++ b/llvm/test/Transforms/LoopVectorize/scalable-predication.ll
@@ -34,7 +34,7 @@ define void @foo(i32 %val, ptr dereferenceable(1024) %ptr) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK: while.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[LD1:%.*]] = load i32, ptr [[GEP]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll b/llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll
index b2acc64..77f2fc5 100644
--- a/llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll
+++ b/llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll
@@ -96,17 +96,17 @@ define void @integer_induction_wraps_scev_predicate_known(i32 %x, ptr %call, ptr
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP0]], 4
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP0]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i64> [[DOTSPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP4:%.*]] = mul <4 x i64> <i64 0, i64 1, i64 2, i64 3>, [[DOTSPLAT]]
-; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> [[TMP4]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP0]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = mul <4 x i64> <i64 0, i64 1, i64 2, i64 3>, [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> [[TMP3]]
; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[INDEX]] to i32
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i32 30, [[DOTCAST]]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[OFFSET_IDX]]
-; CHECK-NEXT: store <4 x ptr> [[VECTOR_GEP]], ptr [[TMP5]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[OFFSET_IDX]]
+; CHECK-NEXT: store <4 x ptr> [[VECTOR_GEP]], ptr [[TMP4]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP0]], 4
+; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP5]]
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 992
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/select-reduction.ll b/llvm/test/Transforms/LoopVectorize/select-reduction.ll
index cfc9bb2..03b3ff2 100644
--- a/llvm/test/Transforms/LoopVectorize/select-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/select-reduction.ll
@@ -42,8 +42,8 @@ define i32 @test(i64 %N, i32 %x) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[EXTRA_ITER]], [[LOOP_PREHEADER]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[NEXT:%.*]] = phi i32 [ [[SEL:%.*]], [[LOOP]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[NEXT:%.*]] = phi i32 [ [[SEL:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[EXTRA_ITER]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[SEL_COND:%.*]] = icmp sgt i32 [[NEXT]], 10
; CHECK-NEXT: [[SEL]] = select i1 [[SEL_COND]], i32 [[NEXT]], i32 10
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1
@@ -98,8 +98,8 @@ define i32 @pr66895_tail_fold_reduction_exit_inst_gets_simplified(i32 %n) {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 12, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[RED:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], -1
; CHECK-NEXT: [[RED_NEXT]] = mul i32 [[RED]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 0
diff --git a/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll b/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll
index bf86cbd..6052224 100644
--- a/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll
+++ b/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll
@@ -47,8 +47,8 @@ define void @pr75298_store_reduction_value_in_folded_loop(i64 %iv.start) optsize
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[PH]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_START]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[RED:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[L:%.*]] = load i32, ptr @c, align 4
; CHECK-NEXT: [[RED_NEXT]] = xor i32 [[RED]], [[L]]
; CHECK-NEXT: store i32 [[RED_NEXT]], ptr @a, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll b/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll
index eefa3da..e7b243e 100644
--- a/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll
+++ b/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll
@@ -29,8 +29,8 @@ define float @pr70988() {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[RDX_NEXT]] = fadd contract float [[RDX]], 1.000000e+00
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw nsw i32 [[INDEX]], 1
; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[INDEX_NEXT]], 1021
@@ -64,8 +64,8 @@ define float @pr70988() {
; CHECK-ALM-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-ALM-NEXT: br label [[LOOP:%.*]]
; CHECK-ALM: loop:
-; CHECK-ALM-NEXT: [[INDEX:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ]
-; CHECK-ALM-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ]
+; CHECK-ALM-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ]
+; CHECK-ALM-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ]
; CHECK-ALM-NEXT: [[RDX_NEXT]] = fadd contract float [[RDX]], 1.000000e+00
; CHECK-ALM-NEXT: [[INDEX_NEXT]] = add nuw nsw i32 [[INDEX]], 1
; CHECK-ALM-NEXT: [[COND:%.*]] = icmp ult i32 [[INDEX_NEXT]], 1021
@@ -133,8 +133,8 @@ define float @pr72720reduction_using_active_lane_mask(ptr %src) {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NARROW:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[NARROW:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[NARROW]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[SRC]], i32 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP]], align 4
@@ -185,8 +185,8 @@ define float @pr72720reduction_using_active_lane_mask(ptr %src) {
; CHECK-ALM-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-ALM-NEXT: br label [[LOOP:%.*]]
; CHECK-ALM: loop:
-; CHECK-ALM-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NARROW:%.*]], [[LOOP]] ]
-; CHECK-ALM-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ]
+; CHECK-ALM-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[NARROW:%.*]], [[LOOP]] ]
+; CHECK-ALM-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ]
; CHECK-ALM-NEXT: [[NARROW]] = add nuw nsw i32 [[IV]], 1
; CHECK-ALM-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[SRC]], i32 [[IV]]
; CHECK-ALM-NEXT: [[L:%.*]] = load float, ptr [[GEP]], align 4
@@ -243,8 +243,8 @@ define float @fadd_reduction_with_live_in(float %inc) {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[SUM:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[SUM:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[SUM_NEXT]] = fadd float [[SUM]], [[INC]]
; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 1000
@@ -279,8 +279,8 @@ define float @fadd_reduction_with_live_in(float %inc) {
; CHECK-ALM-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-ALM-NEXT: br label [[LOOP:%.*]]
; CHECK-ALM: loop:
-; CHECK-ALM-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-ALM-NEXT: [[SUM:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP]] ]
+; CHECK-ALM-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-ALM-NEXT: [[SUM:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP]] ]
; CHECK-ALM-NEXT: [[SUM_NEXT]] = fadd float [[SUM]], [[INC]]
; CHECK-ALM-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
; CHECK-ALM-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 1000
diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll
index 3cf8b3f..9f33db8 100644
--- a/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll
+++ b/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll
@@ -58,7 +58,7 @@ define i32 @test(ptr %vf1, i64 %n) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[TMP18:%.*]] = alloca i8, i64 [[N]], align 16
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[VF1]], i64 [[INDVARS_IV]]
; CHECK-NEXT: store ptr [[TMP18]], ptr [[ARRAYIDX]], align 8
diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll
index efc2b8d..ac15787 100644
--- a/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll
+++ b/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll
@@ -38,7 +38,7 @@ define void @canonical_small_tc_i8(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -99,7 +99,7 @@ define void @canonical_upper_limit_i8(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -160,7 +160,7 @@ define void @canonical_lower_limit_i16(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -221,7 +221,7 @@ define void @canonical_upper_limit_i16(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -282,7 +282,7 @@ define void @canonical_lower_limit_i32(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -343,7 +343,7 @@ define void @canonical_upper_limit_i32(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -404,7 +404,7 @@ define void @canonical_lower_limit_i64(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -465,7 +465,7 @@ define void @canonical_upper_limit_i64(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -526,7 +526,7 @@ define void @canonical_lower_limit_i128(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i256 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i256 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i256 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i256 [[IV]]
; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i256 [[IV]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll
index 222c1ee..6f4bb1d 100644
--- a/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll
+++ b/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll
@@ -59,7 +59,7 @@ define void @tail_fold_switch(ptr %dst, i32 %0) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: switch i32 [[TMP0]], label %[[LOOP_LATCH]] [
; CHECK-NEXT: i32 0, label %[[LOOP_LATCH]]
; CHECK-NEXT: i32 1, label %[[IF_THEN:.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll
index 13d5be1..d39a146 100644
--- a/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll
+++ b/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll
@@ -60,7 +60,7 @@ define void @VF1-VPlanExe(ptr %dst) {
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[INDVARS_IV]]
; CHECK-NEXT: store i32 0, ptr [[DST_PTR]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -140,7 +140,7 @@ define void @VF1-VPWidenCanonicalIVRecipeExe(ptr %ptr1) {
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
; CHECK: for.body:
-; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[PTR:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[PTR:%.*]], [[FOR_BODY]] ], [ [[PTR1]], [[SCALAR_PH]] ]
; CHECK-NEXT: store double 0.000000e+00, ptr [[ADDR]], align 8
; CHECK-NEXT: [[PTR]] = getelementptr inbounds double, ptr [[ADDR]], i64 1
; CHECK-NEXT: [[COND:%.*]] = icmp eq ptr [[PTR]], [[PTR2]]
diff --git a/llvm/test/Transforms/LoopVectorize/uniform-blend.ll b/llvm/test/Transforms/LoopVectorize/uniform-blend.ll
index 85cf925..a35e763 100644
--- a/llvm/test/Transforms/LoopVectorize/uniform-blend.ll
+++ b/llvm/test/Transforms/LoopVectorize/uniform-blend.ll
@@ -302,7 +302,7 @@ define void @redundant_branch_and_blends_without_mask(ptr %A) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: [[GEP_IV:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_IV]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[L]], 10
diff --git a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll
index 59c76ae..983f327 100644
--- a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll
+++ b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll
@@ -224,7 +224,7 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5,
; VF8UF1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 2, %[[ENTRY]] ]
; VF8UF1-NEXT: br label %[[LOOP:.*]]
; VF8UF1: [[LOOP]]:
-; VF8UF1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF8UF1-NEXT: [[IV:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; VF8UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV]]
; VF8UF1-NEXT: store i16 0, ptr [[GEP_DST]], align 2
; VF8UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
@@ -368,7 +368,7 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5,
; VF8UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 2, %[[ENTRY]] ]
; VF8UF2-NEXT: br label %[[LOOP:.*]]
; VF8UF2: [[LOOP]]:
-; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; VF8UF2-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV]]
; VF8UF2-NEXT: store i16 0, ptr [[GEP_DST]], align 2
; VF8UF2-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
@@ -511,7 +511,7 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5,
; VF16UF1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 2, %[[ENTRY]] ]
; VF16UF1-NEXT: br label %[[LOOP:.*]]
; VF16UF1: [[LOOP]]:
-; VF16UF1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF16UF1-NEXT: [[IV:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; VF16UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV]]
; VF16UF1-NEXT: store i16 0, ptr [[GEP_DST]], align 2
; VF16UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
@@ -797,7 +797,7 @@ define void @scev_expand_step(i64 %x, ptr %dst) {
; VF8UF1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; VF8UF1-NEXT: br label %[[LOOP:.*]]
; VF8UF1: [[LOOP]]:
-; VF8UF1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF8UF1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; VF8UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[STEP]]
; VF8UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]]
; VF8UF1-NEXT: store i8 0, ptr [[GEP_DST]], align 1
@@ -994,7 +994,7 @@ define void @scev_expand_step(i64 %x, ptr %dst) {
; VF8UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; VF8UF2-NEXT: br label %[[LOOP:.*]]
; VF8UF2: [[LOOP]]:
-; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; VF8UF2-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[STEP]]
; VF8UF2-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]]
; VF8UF2-NEXT: store i8 0, ptr [[GEP_DST]], align 1
@@ -1190,7 +1190,7 @@ define void @scev_expand_step(i64 %x, ptr %dst) {
; VF16UF1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; VF16UF1-NEXT: br label %[[LOOP:.*]]
; VF16UF1: [[LOOP]]:
-; VF16UF1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF16UF1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; VF16UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[STEP]]
; VF16UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]]
; VF16UF1-NEXT: store i8 0, ptr [[GEP_DST]], align 1
diff --git a/llvm/test/Transforms/LoopVectorize/vplan-printing-outer-loop.ll b/llvm/test/Transforms/LoopVectorize/vplan-printing-outer-loop.ll
index 6804817..20676f3 100644
--- a/llvm/test/Transforms/LoopVectorize/vplan-printing-outer-loop.ll
+++ b/llvm/test/Transforms/LoopVectorize/vplan-printing-outer-loop.ll
@@ -13,14 +13,14 @@ define void @foo(i64 %n) {
; CHECK-NEXT: Successor(s): outer.header
; CHECK-EMPTY:
; CHECK-NEXT: outer.header:
-; CHECK-NEXT: WIDEN-PHI ir<%outer.iv> = phi [ ir<%outer.iv.next>, outer.latch ], [ ir<0>, ir-bb<entry> ]
+; CHECK-NEXT: EMIT-SCALAR ir<%outer.iv> = phi [ ir<%outer.iv.next>, outer.latch ], [ ir<0>, ir-bb<entry> ]
; CHECK-NEXT: EMIT ir<%gep.1> = getelementptr ir<@arr2>, ir<0>, ir<%outer.iv>
; CHECK-NEXT: EMIT store ir<%outer.iv>, ir<%gep.1>
; CHECK-NEXT: EMIT ir<%add> = add ir<%outer.iv>, ir<%n>
; CHECK-NEXT: Successor(s): inner
; CHECK-EMPTY:
; CHECK-NEXT: inner:
-; CHECK-NEXT: WIDEN-PHI ir<%inner.iv> = phi [ ir<%inner.iv.next>, inner ], [ ir<0>, outer.header ]
+; CHECK-NEXT: EMIT-SCALAR ir<%inner.iv> = phi [ ir<%inner.iv.next>, inner ], [ ir<0>, outer.header ]
; CHECK-NEXT: EMIT ir<%gep.2> = getelementptr ir<@arr>, ir<0>, ir<%inner.iv>, ir<%outer.iv>
; CHECK-NEXT: EMIT store ir<%add>, ir<%gep.2>
; CHECK-NEXT: EMIT ir<%inner.iv.next> = add ir<%inner.iv>, ir<1>
diff --git a/llvm/test/Transforms/MemCpyOpt/capturing-func.ll b/llvm/test/Transforms/MemCpyOpt/capturing-func.ll
index 627dca5..47c4358 100644
--- a/llvm/test/Transforms/MemCpyOpt/capturing-func.ll
+++ b/llvm/test/Transforms/MemCpyOpt/capturing-func.ll
@@ -67,28 +67,6 @@ define void @test_lifetime_end() {
ret void
}
-; Lifetime of %ptr2 does not end, because of size mismatch.
-define void @test_lifetime_not_end() {
-; CHECK-LABEL: define {{[^@]+}}@test_lifetime_not_end() {
-; CHECK-NEXT: [[PTR1:%.*]] = alloca i8, align 1
-; CHECK-NEXT: [[PTR2:%.*]] = alloca i8, align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 1, ptr [[PTR2]])
-; CHECK-NEXT: call void @foo(ptr [[PTR2]])
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr [[PTR1]], ptr [[PTR2]], i32 1, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 0, ptr [[PTR2]])
-; CHECK-NEXT: call void @foo(ptr [[PTR1]])
-; CHECK-NEXT: ret void
-;
- %ptr1 = alloca i8
- %ptr2 = alloca i8
- call void @llvm.lifetime.start.p0(i64 1, ptr %ptr2)
- call void @foo(ptr %ptr2)
- call void @llvm.memcpy.p0.p0.i32(ptr %ptr1, ptr %ptr2, i32 1, i1 false)
- call void @llvm.lifetime.end.p0(i64 0, ptr %ptr2)
- call void @foo(ptr %ptr1)
- ret void
-}
-
; Lifetime of %ptr2 ends before any potential use of the capture because we
; return from the function.
define void @test_function_end() {
diff --git a/llvm/test/Transforms/MemCpyOpt/memcpy-undef.ll b/llvm/test/Transforms/MemCpyOpt/memcpy-undef.ll
index 816e103..84253dc 100644
--- a/llvm/test/Transforms/MemCpyOpt/memcpy-undef.ll
+++ b/llvm/test/Transforms/MemCpyOpt/memcpy-undef.ll
@@ -39,20 +39,6 @@ define void @test2(ptr sret(i8) noalias nocapture %out) nounwind noinline ssp uw
}
; Check that the memcpy is not removed.
-define void @test3(ptr sret(i8) noalias nocapture %out) nounwind noinline ssp uwtable {
-; CHECK-LABEL: @test3(
-; CHECK-NEXT: [[IN:%.*]] = alloca i64, align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[IN]])
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[OUT:%.*]], ptr [[IN]], i64 8, i1 false)
-; CHECK-NEXT: ret void
-;
- %in = alloca i64
- call void @llvm.lifetime.start.p0(i64 4, ptr %in)
- call void @llvm.memcpy.p0.p0.i64(ptr %out, ptr %in, i64 8, i1 false)
- ret void
-}
-
-; Check that the memcpy is not removed.
define void @test_lifetime_may_alias(ptr %src, ptr %dst) {
; CHECK-LABEL: @test_lifetime_may_alias(
; CHECK-NEXT: [[LIFETIME:%.*]] = alloca i64, align 8
@@ -96,38 +82,6 @@ define void @test_lifetime_partial_alias_2(ptr noalias %dst) {
ret void
}
-; lifetime.start on part of alloca, copy in range.
-define void @test_lifetime_partial_alias_3(ptr noalias %dst) {
-; CHECK-LABEL: @test_lifetime_partial_alias_3(
-; CHECK-NEXT: [[A:%.*]] = alloca [16 x i8], align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr [[A]])
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[A]], i64 8
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[DST:%.*]], ptr [[GEP]], i64 4, i1 false)
-; CHECK-NEXT: ret void
-;
- %a = alloca [16 x i8]
- call void @llvm.lifetime.start.p0(i64 12, ptr %a)
- %gep = getelementptr i8, ptr %a, i64 8
- call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %gep, i64 4, i1 false)
- ret void
-}
-
-; lifetime.start on part of alloca, copy out of range.
-define void @test_lifetime_partial_alias_4(ptr noalias %dst) {
-; CHECK-LABEL: @test_lifetime_partial_alias_4(
-; CHECK-NEXT: [[A:%.*]] = alloca [16 x i8], align 1
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr [[A]])
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[A]], i64 8
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[DST:%.*]], ptr [[GEP]], i64 8, i1 false)
-; CHECK-NEXT: ret void
-;
- %a = alloca [16 x i8]
- call void @llvm.lifetime.start.p0(i64 12, ptr %a)
- %gep = getelementptr i8, ptr %a, i64 8
- call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %gep, i64 8, i1 false)
- ret void
-}
-
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
diff --git a/llvm/test/Transforms/MemCpyOpt/memset-memcpy-oversized.ll b/llvm/test/Transforms/MemCpyOpt/memset-memcpy-oversized.ll
index 7ea63bb..343f951 100644
--- a/llvm/test/Transforms/MemCpyOpt/memset-memcpy-oversized.ll
+++ b/llvm/test/Transforms/MemCpyOpt/memset-memcpy-oversized.ll
@@ -37,26 +37,6 @@ define void @test_alloca_with_lifetimes(ptr %result) {
ret void
}
-; memcpy size is larger than lifetime, don't optimize.
-define void @test_copy_larger_than_lifetime_size(ptr %result) {
-; CHECK-LABEL: @test_copy_larger_than_lifetime_size(
-; CHECK-NEXT: [[A:%.*]] = alloca [[T:%.*]], align 8
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr [[A]])
-; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[A]], i8 0, i64 12, i1 false)
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr [[RESULT:%.*]], ptr align 8 [[A]], i64 16, i1 false)
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr [[A]])
-; CHECK-NEXT: call void @free(ptr [[A]])
-; CHECK-NEXT: ret void
-;
- %a = alloca %T, align 8
- call void @llvm.lifetime.start.p0(i64 12, ptr %a)
- call void @llvm.memset.p0.i64(ptr align 8 %a, i8 0, i64 12, i1 false)
- call void @llvm.memcpy.p0.p0.i64(ptr %result, ptr align 8 %a, i64 16, i1 false)
- call void @llvm.lifetime.end.p0(i64 12, ptr %a)
- call void @free(ptr %a)
- ret void
-}
-
; The trailing bytes are not known to be undef, we can't ignore them.
define void @test_not_undef_memory(ptr %result, ptr %input) {
; CHECK-LABEL: @test_not_undef_memory(
diff --git a/llvm/test/Transforms/PGOProfile/profcheck-select.ll b/llvm/test/Transforms/PGOProfile/profcheck-select.ll
new file mode 100644
index 0000000..b5dc97d
--- /dev/null
+++ b/llvm/test/Transforms/PGOProfile/profcheck-select.ll
@@ -0,0 +1,63 @@
+; RUN: split-file %s %t
+
+; RUN: opt -passes=prof-inject %t/inject.ll -S -o - | FileCheck %t/inject.ll
+
+; RUN: opt -passes=prof-inject %t/inject-some.ll \
+; RUN: -profcheck-default-select-true-weight=1 -profcheck-default-select-false-weight=6 \
+; RUN: -S -o - | FileCheck %t/inject-some.ll
+
+; RUN: opt -passes=prof-verify %t/verify.ll 2>&1 | FileCheck %t/verify.ll
+
+; RUN: not opt -passes=prof-verify %t/verify-missing.ll 2>&1 | FileCheck %t/verify-missing.ll
+
+; verify we can disable it. It's sufficient to see opt not failing.
+; RUN: opt -passes=prof-verify -profcheck-annotate-select=0 %t/verify-missing.ll
+
+;--- inject.ll
+declare void @foo(i32 %a);
+define void @bar(i1 %c) {
+ %v = select i1 %c, i32 1, i32 2
+ call void @foo(i32 %v)
+ ret void
+}
+; CHECK-LABEL: @bar
+; CHECK: %v = select i1 %c, i32 1, i32 2, !prof !1
+; CHECK: !0 = !{!"function_entry_count", i64 1000}
+; CHECK: !1 = !{!"branch_weights", i32 2, i32 3}
+
+;--- inject-some.ll
+declare void @foo(i32 %a);
+define void @bar(i1 %c) {
+ %e = select i1 %c, i32 1, i32 2, !prof !0
+ %c2 = icmp eq i32 %e, 2
+ %v = select i1 %c2, i32 5, i32 10
+ call void @foo(i32 %v)
+ ret void
+}
+!0 = !{!"branch_weights", i32 2, i32 3}
+; CHECK-LABEL: @bar
+; CHECK: %v = select i1 %c2, i32 5, i32 10, !prof !2
+; CHECK: !0 = !{!"function_entry_count", i64 1000}
+; CHECK: !1 = !{!"branch_weights", i32 2, i32 3}
+; CHECK: !2 = !{!"branch_weights", i32 1, i32 6}
+
+;--- verify.ll
+declare void @foo(i32 %a);
+define void @bar(i1 %c) !prof !0 {
+ %v = select i1 %c, i32 1, i32 2, !prof !1
+ call void @foo(i32 %v)
+ ret void
+}
+!0 = !{!"function_entry_count", i64 1000}
+!1 = !{!"branch_weights", i32 1, i32 7}
+; CHECK-NOT: Profile verification failed: select annotation missing
+
+;--- verify-missing.ll
+declare void @foo(i32 %a);
+define void @bar(i1 %c) !prof !0 {
+ %v = select i1 %c, i32 1, i32 2
+ call void @foo(i32 %v)
+ ret void
+}
+!0 = !{!"function_entry_count", i64 1000}
+; CHECK: Profile verification failed: select annotation missing \ No newline at end of file
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll
new file mode 100644
index 0000000..645dbc4
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll
@@ -0,0 +1,741 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+
+; RUN: opt -mtriple=riscv64 -mattr=+m,+v -passes=slp-vectorizer -S < %s | FileCheck %s
+
+define void @const_stride_1_no_reordering(ptr %pl, ptr %ps) {
+; CHECK-LABEL: define void @const_stride_1_no_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0
+; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[GEP_L0]], align 16
+; CHECK-NEXT: store <16 x i8> [[TMP1]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT: ret void
+;
+ %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0
+ %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 1
+ %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 2
+ %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 3
+ %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 4
+ %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 5
+ %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 6
+ %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 7
+ %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 8
+ %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 9
+ %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 10
+ %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 11
+ %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 12
+ %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 13
+ %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 14
+ %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 15
+
+ %load0 = load i8, ptr %gep_l0 , align 16
+ %load1 = load i8, ptr %gep_l1 , align 16
+ %load2 = load i8, ptr %gep_l2 , align 16
+ %load3 = load i8, ptr %gep_l3 , align 16
+ %load4 = load i8, ptr %gep_l4 , align 16
+ %load5 = load i8, ptr %gep_l5 , align 16
+ %load6 = load i8, ptr %gep_l6 , align 16
+ %load7 = load i8, ptr %gep_l7 , align 16
+ %load8 = load i8, ptr %gep_l8 , align 16
+ %load9 = load i8, ptr %gep_l9 , align 16
+ %load10 = load i8, ptr %gep_l10, align 16
+ %load11 = load i8, ptr %gep_l11, align 16
+ %load12 = load i8, ptr %gep_l12, align 16
+ %load13 = load i8, ptr %gep_l13, align 16
+ %load14 = load i8, ptr %gep_l14, align 16
+ %load15 = load i8, ptr %gep_l15, align 16
+
+ %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+ %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+ %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+ %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+ %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+ %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+ %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+ %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+ %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+ %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+ %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+ %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+ %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+ %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+ %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+ %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+ store i8 %load0, ptr %gep_s0, align 16
+ store i8 %load1, ptr %gep_s1, align 16
+ store i8 %load2, ptr %gep_s2, align 16
+ store i8 %load3, ptr %gep_s3, align 16
+ store i8 %load4, ptr %gep_s4, align 16
+ store i8 %load5, ptr %gep_s5, align 16
+ store i8 %load6, ptr %gep_s6, align 16
+ store i8 %load7, ptr %gep_s7, align 16
+ store i8 %load8, ptr %gep_s8, align 16
+ store i8 %load9, ptr %gep_s9, align 16
+ store i8 %load10, ptr %gep_s10, align 16
+ store i8 %load11, ptr %gep_s11, align 16
+ store i8 %load12, ptr %gep_s12, align 16
+ store i8 %load13, ptr %gep_s13, align 16
+ store i8 %load14, ptr %gep_s14, align 16
+ store i8 %load15, ptr %gep_s15, align 16
+
+ ret void
+}
+
+define void @const_stride_1_with_reordering(ptr %pl, ptr %ps) {
+; CHECK-LABEL: define void @const_stride_1_with_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0
+; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[GEP_L0]], align 16
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> poison, <16 x i32> <i32 1, i32 0, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT: ret void
+;
+ %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0
+ %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 1
+ %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 2
+ %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 3
+ %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 4
+ %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 5
+ %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 6
+ %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 7
+ %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 8
+ %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 9
+ %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 10
+ %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 11
+ %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 12
+ %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 13
+ %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 14
+ %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 15
+
+ %load0 = load i8, ptr %gep_l0 , align 16
+ %load1 = load i8, ptr %gep_l1 , align 16
+ %load2 = load i8, ptr %gep_l2 , align 16
+ %load3 = load i8, ptr %gep_l3 , align 16
+ %load4 = load i8, ptr %gep_l4 , align 16
+ %load5 = load i8, ptr %gep_l5 , align 16
+ %load6 = load i8, ptr %gep_l6 , align 16
+ %load7 = load i8, ptr %gep_l7 , align 16
+ %load8 = load i8, ptr %gep_l8 , align 16
+ %load9 = load i8, ptr %gep_l9 , align 16
+ %load10 = load i8, ptr %gep_l10, align 16
+ %load11 = load i8, ptr %gep_l11, align 16
+ %load12 = load i8, ptr %gep_l12, align 16
+ %load13 = load i8, ptr %gep_l13, align 16
+ %load14 = load i8, ptr %gep_l14, align 16
+ %load15 = load i8, ptr %gep_l15, align 16
+
+ %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+ %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+ %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+ %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+ %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+ %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+ %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+ %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+ %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+ %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+ %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+ %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+ %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+ %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+ %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+ %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+ ; NOTE: value from %load1 in stored in %gep_s0
+ store i8 %load1, ptr %gep_s0, align 16
+ store i8 %load0, ptr %gep_s1, align 16
+ store i8 %load2, ptr %gep_s2, align 16
+ store i8 %load3, ptr %gep_s3, align 16
+ store i8 %load4, ptr %gep_s4, align 16
+ store i8 %load5, ptr %gep_s5, align 16
+ store i8 %load6, ptr %gep_s6, align 16
+ store i8 %load7, ptr %gep_s7, align 16
+ store i8 %load8, ptr %gep_s8, align 16
+ store i8 %load9, ptr %gep_s9, align 16
+ store i8 %load10, ptr %gep_s10, align 16
+ store i8 %load11, ptr %gep_s11, align 16
+ store i8 %load12, ptr %gep_s12, align 16
+ store i8 %load13, ptr %gep_s13, align 16
+ store i8 %load14, ptr %gep_s14, align 16
+ store i8 %load15, ptr %gep_s15, align 16
+
+ ret void
+}
+
+
+define void @const_stride_2_no_reordering(ptr %pl, ptr %ps) {
+; CHECK-LABEL: define void @const_stride_2_no_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0
+; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = call <31 x i8> @llvm.masked.load.v31i8.p0(ptr [[GEP_L0]], i32 16, <31 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true>, <31 x i8> poison)
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <31 x i8> [[TMP2]], <31 x i8> poison, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+; CHECK-NEXT: store <16 x i8> [[TMP1]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT: ret void
+;
+ %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0
+ %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 2
+ %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 4
+ %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 6
+ %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 8
+ %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 10
+ %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 12
+ %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 14
+ %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 16
+ %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 18
+ %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 20
+ %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 22
+ %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 24
+ %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 26
+ %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 28
+ %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 30
+
+ %load0 = load i8, ptr %gep_l0 , align 16
+ %load1 = load i8, ptr %gep_l1 , align 16
+ %load2 = load i8, ptr %gep_l2 , align 16
+ %load3 = load i8, ptr %gep_l3 , align 16
+ %load4 = load i8, ptr %gep_l4 , align 16
+ %load5 = load i8, ptr %gep_l5 , align 16
+ %load6 = load i8, ptr %gep_l6 , align 16
+ %load7 = load i8, ptr %gep_l7 , align 16
+ %load8 = load i8, ptr %gep_l8 , align 16
+ %load9 = load i8, ptr %gep_l9 , align 16
+ %load10 = load i8, ptr %gep_l10, align 16
+ %load11 = load i8, ptr %gep_l11, align 16
+ %load12 = load i8, ptr %gep_l12, align 16
+ %load13 = load i8, ptr %gep_l13, align 16
+ %load14 = load i8, ptr %gep_l14, align 16
+ %load15 = load i8, ptr %gep_l15, align 16
+
+ %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+ %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+ %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+ %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+ %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+ %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+ %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+ %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+ %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+ %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+ %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+ %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+ %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+ %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+ %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+ %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+ store i8 %load0, ptr %gep_s0, align 16
+ store i8 %load1, ptr %gep_s1, align 16
+ store i8 %load2, ptr %gep_s2, align 16
+ store i8 %load3, ptr %gep_s3, align 16
+ store i8 %load4, ptr %gep_s4, align 16
+ store i8 %load5, ptr %gep_s5, align 16
+ store i8 %load6, ptr %gep_s6, align 16
+ store i8 %load7, ptr %gep_s7, align 16
+ store i8 %load8, ptr %gep_s8, align 16
+ store i8 %load9, ptr %gep_s9, align 16
+ store i8 %load10, ptr %gep_s10, align 16
+ store i8 %load11, ptr %gep_s11, align 16
+ store i8 %load12, ptr %gep_s12, align 16
+ store i8 %load13, ptr %gep_s13, align 16
+ store i8 %load14, ptr %gep_s14, align 16
+ store i8 %load15, ptr %gep_s15, align 16
+
+ ret void
+}
+
+define void @const_stride_2_with_reordering(ptr %pl, ptr %ps) {
+; CHECK-LABEL: define void @const_stride_2_with_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0
+; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = call <31 x i8> @llvm.masked.load.v31i8.p0(ptr [[GEP_L0]], i32 16, <31 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true>, <31 x i8> poison)
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <31 x i8> [[TMP1]], <31 x i8> poison, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <31 x i8> [[TMP1]], <31 x i8> poison, <16 x i32> <i32 2, i32 0, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT: ret void
+;
+ %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0
+ %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 2
+ %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 4
+ %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 6
+ %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 8
+ %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 10
+ %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 12
+ %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 14
+ %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 16
+ %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 18
+ %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 20
+ %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 22
+ %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 24
+ %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 26
+ %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 28
+ %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 30
+
+ %load0 = load i8, ptr %gep_l0 , align 16
+ %load1 = load i8, ptr %gep_l1 , align 16
+ %load2 = load i8, ptr %gep_l2 , align 16
+ %load3 = load i8, ptr %gep_l3 , align 16
+ %load4 = load i8, ptr %gep_l4 , align 16
+ %load5 = load i8, ptr %gep_l5 , align 16
+ %load6 = load i8, ptr %gep_l6 , align 16
+ %load7 = load i8, ptr %gep_l7 , align 16
+ %load8 = load i8, ptr %gep_l8 , align 16
+ %load9 = load i8, ptr %gep_l9 , align 16
+ %load10 = load i8, ptr %gep_l10, align 16
+ %load11 = load i8, ptr %gep_l11, align 16
+ %load12 = load i8, ptr %gep_l12, align 16
+ %load13 = load i8, ptr %gep_l13, align 16
+ %load14 = load i8, ptr %gep_l14, align 16
+ %load15 = load i8, ptr %gep_l15, align 16
+
+ %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+ %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+ %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+ %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+ %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+ %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+ %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+ %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+ %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+ %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+ %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+ %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+ %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+ %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+ %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+ %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+ store i8 %load1, ptr %gep_s0, align 16
+ store i8 %load0, ptr %gep_s1, align 16
+ store i8 %load2, ptr %gep_s2, align 16
+ store i8 %load3, ptr %gep_s3, align 16
+ store i8 %load4, ptr %gep_s4, align 16
+ store i8 %load5, ptr %gep_s5, align 16
+ store i8 %load6, ptr %gep_s6, align 16
+ store i8 %load7, ptr %gep_s7, align 16
+ store i8 %load8, ptr %gep_s8, align 16
+ store i8 %load9, ptr %gep_s9, align 16
+ store i8 %load10, ptr %gep_s10, align 16
+ store i8 %load11, ptr %gep_s11, align 16
+ store i8 %load12, ptr %gep_s12, align 16
+ store i8 %load13, ptr %gep_s13, align 16
+ store i8 %load14, ptr %gep_s14, align 16
+ store i8 %load15, ptr %gep_s15, align 16
+
+ ret void
+}
+
+define void @rt_stride_1_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
+; CHECK-LABEL: define void @rt_stride_1_no_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[STRIDE0:%.*]] = mul nsw i64 [[STRIDE]], 0
+; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[STRIDE0]]
+; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[STRIDE]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.experimental.vp.strided.load.v16i8.p0.i64(ptr align 16 [[GEP_L0]], i64 [[TMP1]], <16 x i1> splat (i1 true), i32 16)
+; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT: ret void
+;
+ %stride0 = mul nsw i64 %stride, 0
+ %stride1 = mul nsw i64 %stride, 1
+ %stride2 = mul nsw i64 %stride, 2
+ %stride3 = mul nsw i64 %stride, 3
+ %stride4 = mul nsw i64 %stride, 4
+ %stride5 = mul nsw i64 %stride, 5
+ %stride6 = mul nsw i64 %stride, 6
+ %stride7 = mul nsw i64 %stride, 7
+ %stride8 = mul nsw i64 %stride, 8
+ %stride9 = mul nsw i64 %stride, 9
+ %stride10 = mul nsw i64 %stride, 10
+ %stride11 = mul nsw i64 %stride, 11
+ %stride12 = mul nsw i64 %stride, 12
+ %stride13 = mul nsw i64 %stride, 13
+ %stride14 = mul nsw i64 %stride, 14
+ %stride15 = mul nsw i64 %stride, 15
+
+ %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %stride0
+ %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 %stride1
+ %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 %stride2
+ %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 %stride3
+ %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 %stride4
+ %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 %stride5
+ %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 %stride6
+ %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 %stride7
+ %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 %stride8
+ %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 %stride9
+ %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 %stride10
+ %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 %stride11
+ %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 %stride12
+ %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 %stride13
+ %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 %stride14
+ %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 %stride15
+
+ %load0 = load i8, ptr %gep_l0 , align 16
+ %load1 = load i8, ptr %gep_l1 , align 16
+ %load2 = load i8, ptr %gep_l2 , align 16
+ %load3 = load i8, ptr %gep_l3 , align 16
+ %load4 = load i8, ptr %gep_l4 , align 16
+ %load5 = load i8, ptr %gep_l5 , align 16
+ %load6 = load i8, ptr %gep_l6 , align 16
+ %load7 = load i8, ptr %gep_l7 , align 16
+ %load8 = load i8, ptr %gep_l8 , align 16
+ %load9 = load i8, ptr %gep_l9 , align 16
+ %load10 = load i8, ptr %gep_l10, align 16
+ %load11 = load i8, ptr %gep_l11, align 16
+ %load12 = load i8, ptr %gep_l12, align 16
+ %load13 = load i8, ptr %gep_l13, align 16
+ %load14 = load i8, ptr %gep_l14, align 16
+ %load15 = load i8, ptr %gep_l15, align 16
+
+ %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+ %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+ %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+ %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+ %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+ %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+ %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+ %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+ %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+ %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+ %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+ %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+ %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+ %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+ %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+ %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+ store i8 %load0, ptr %gep_s0, align 16
+ store i8 %load1, ptr %gep_s1, align 16
+ store i8 %load2, ptr %gep_s2, align 16
+ store i8 %load3, ptr %gep_s3, align 16
+ store i8 %load4, ptr %gep_s4, align 16
+ store i8 %load5, ptr %gep_s5, align 16
+ store i8 %load6, ptr %gep_s6, align 16
+ store i8 %load7, ptr %gep_s7, align 16
+ store i8 %load8, ptr %gep_s8, align 16
+ store i8 %load9, ptr %gep_s9, align 16
+ store i8 %load10, ptr %gep_s10, align 16
+ store i8 %load11, ptr %gep_s11, align 16
+ store i8 %load12, ptr %gep_s12, align 16
+ store i8 %load13, ptr %gep_s13, align 16
+ store i8 %load14, ptr %gep_s14, align 16
+ store i8 %load15, ptr %gep_s15, align 16
+
+ ret void
+}
+
+define void @rt_stride_1_with_reordering(ptr %pl, i64 %stride, ptr %ps) {
+; CHECK-LABEL: define void @rt_stride_1_with_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[STRIDE0:%.*]] = mul nsw i64 [[STRIDE]], 0
+; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[STRIDE0]]
+; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[STRIDE]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.experimental.vp.strided.load.v16i8.p0.i64(ptr align 16 [[GEP_L0]], i64 [[TMP1]], <16 x i1> splat (i1 true), i32 16)
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[TMP2]], <16 x i8> poison, <16 x i32> <i32 1, i32 0, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: store <16 x i8> [[TMP3]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT: ret void
+;
+ %stride0 = mul nsw i64 %stride, 0
+ %stride1 = mul nsw i64 %stride, 1
+ %stride2 = mul nsw i64 %stride, 2
+ %stride3 = mul nsw i64 %stride, 3
+ %stride4 = mul nsw i64 %stride, 4
+ %stride5 = mul nsw i64 %stride, 5
+ %stride6 = mul nsw i64 %stride, 6
+ %stride7 = mul nsw i64 %stride, 7
+ %stride8 = mul nsw i64 %stride, 8
+ %stride9 = mul nsw i64 %stride, 9
+ %stride10 = mul nsw i64 %stride, 10
+ %stride11 = mul nsw i64 %stride, 11
+ %stride12 = mul nsw i64 %stride, 12
+ %stride13 = mul nsw i64 %stride, 13
+ %stride14 = mul nsw i64 %stride, 14
+ %stride15 = mul nsw i64 %stride, 15
+
+ %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %stride0
+ %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 %stride1
+ %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 %stride2
+ %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 %stride3
+ %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 %stride4
+ %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 %stride5
+ %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 %stride6
+ %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 %stride7
+ %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 %stride8
+ %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 %stride9
+ %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 %stride10
+ %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 %stride11
+ %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 %stride12
+ %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 %stride13
+ %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 %stride14
+ %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 %stride15
+
+ %load0 = load i8, ptr %gep_l0 , align 16
+ %load1 = load i8, ptr %gep_l1 , align 16
+ %load2 = load i8, ptr %gep_l2 , align 16
+ %load3 = load i8, ptr %gep_l3 , align 16
+ %load4 = load i8, ptr %gep_l4 , align 16
+ %load5 = load i8, ptr %gep_l5 , align 16
+ %load6 = load i8, ptr %gep_l6 , align 16
+ %load7 = load i8, ptr %gep_l7 , align 16
+ %load8 = load i8, ptr %gep_l8 , align 16
+ %load9 = load i8, ptr %gep_l9 , align 16
+ %load10 = load i8, ptr %gep_l10, align 16
+ %load11 = load i8, ptr %gep_l11, align 16
+ %load12 = load i8, ptr %gep_l12, align 16
+ %load13 = load i8, ptr %gep_l13, align 16
+ %load14 = load i8, ptr %gep_l14, align 16
+ %load15 = load i8, ptr %gep_l15, align 16
+
+ %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+ %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+ %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+ %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+ %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+ %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+ %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+ %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+ %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+ %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+ %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+ %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+ %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+ %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+ %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+ %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+ store i8 %load1, ptr %gep_s0, align 16
+ store i8 %load0, ptr %gep_s1, align 16
+ store i8 %load2, ptr %gep_s2, align 16
+ store i8 %load3, ptr %gep_s3, align 16
+ store i8 %load4, ptr %gep_s4, align 16
+ store i8 %load5, ptr %gep_s5, align 16
+ store i8 %load6, ptr %gep_s6, align 16
+ store i8 %load7, ptr %gep_s7, align 16
+ store i8 %load8, ptr %gep_s8, align 16
+ store i8 %load9, ptr %gep_s9, align 16
+ store i8 %load10, ptr %gep_s10, align 16
+ store i8 %load11, ptr %gep_s11, align 16
+ store i8 %load12, ptr %gep_s12, align 16
+ store i8 %load13, ptr %gep_s13, align 16
+ store i8 %load14, ptr %gep_s14, align 16
+ store i8 %load15, ptr %gep_s15, align 16
+
+ ret void
+}
+
+; TODO: We want to generate this code:
+; define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
+; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %offset0
+; %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+; %strided_load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 16 %gep_l0, i64 8, <4 x i1> splat (i1 true), i32 4)
+; %bitcast_ = bitcast <4 x i32> %strided_load to <16 x i8>
+; store <16 x i8> %bitcast_, ptr %gep_s0, align 16
+; ret void
+; }
+define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
+; CHECK-LABEL: define void @constant_stride_widen_no_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0
+; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = call <28 x i8> @llvm.masked.load.v28i8.p0(ptr [[GEP_L0]], i32 16, <28 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>, <28 x i8> poison)
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <28 x i8> [[TMP1]], <28 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19, i32 24, i32 25, i32 26, i32 27>
+; CHECK-NEXT: store <16 x i8> [[TMP8]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT: ret void
+;
+ %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0
+ %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 1
+ %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 2
+ %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 3
+ %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 8
+ %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 9
+ %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 10
+ %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 11
+ %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 16
+ %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 17
+ %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 18
+ %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 19
+ %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 24
+ %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 25
+ %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 26
+ %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 27
+
+ %load0 = load i8, ptr %gep_l0 , align 16
+ %load1 = load i8, ptr %gep_l1 , align 16
+ %load2 = load i8, ptr %gep_l2 , align 16
+ %load3 = load i8, ptr %gep_l3 , align 16
+ %load4 = load i8, ptr %gep_l4 , align 16
+ %load5 = load i8, ptr %gep_l5 , align 16
+ %load6 = load i8, ptr %gep_l6 , align 16
+ %load7 = load i8, ptr %gep_l7 , align 16
+ %load8 = load i8, ptr %gep_l8 , align 16
+ %load9 = load i8, ptr %gep_l9 , align 16
+ %load10 = load i8, ptr %gep_l10, align 16
+ %load11 = load i8, ptr %gep_l11, align 16
+ %load12 = load i8, ptr %gep_l12, align 16
+ %load13 = load i8, ptr %gep_l13, align 16
+ %load14 = load i8, ptr %gep_l14, align 16
+ %load15 = load i8, ptr %gep_l15, align 16
+
+ %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+ %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+ %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+ %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+ %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+ %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+ %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+ %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+ %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+ %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+ %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+ %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+ %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+ %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+ %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+ %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+ store i8 %load0, ptr %gep_s0, align 16
+ store i8 %load1, ptr %gep_s1, align 16
+ store i8 %load2, ptr %gep_s2, align 16
+ store i8 %load3, ptr %gep_s3, align 16
+ store i8 %load4, ptr %gep_s4, align 16
+ store i8 %load5, ptr %gep_s5, align 16
+ store i8 %load6, ptr %gep_s6, align 16
+ store i8 %load7, ptr %gep_s7, align 16
+ store i8 %load8, ptr %gep_s8, align 16
+ store i8 %load9, ptr %gep_s9, align 16
+ store i8 %load10, ptr %gep_s10, align 16
+ store i8 %load11, ptr %gep_s11, align 16
+ store i8 %load12, ptr %gep_s12, align 16
+ store i8 %load13, ptr %gep_s13, align 16
+ store i8 %load14, ptr %gep_s14, align 16
+ store i8 %load15, ptr %gep_s15, align 16
+
+ ret void
+}
+
+; TODO: We want to generate this code:
+; define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
+; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %offset0
+; %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+; %strided_load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 16 %gep_l0, i64 %stride, <4 x i1> splat (i1 true), i32 4)
+; %bitcast_ = bitcast <4 x i32> %strided_load to <16 x i8>
+; store <16 x i8> %bitcast_, ptr %gep_s0, align 16
+; ret void
+; }
+define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
+; CHECK-LABEL: define void @rt_stride_widen_no_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[OFFSET0:%.*]] = mul nsw i64 [[STRIDE]], 0
+; CHECK-NEXT: [[OFFSET4:%.*]] = mul nsw i64 [[STRIDE]], 1
+; CHECK-NEXT: [[OFFSET8:%.*]] = mul nsw i64 [[STRIDE]], 2
+; CHECK-NEXT: [[OFFSET12:%.*]] = mul nsw i64 [[STRIDE]], 3
+; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[OFFSET0]]
+; CHECK-NEXT: [[GEP_L4:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[OFFSET4]]
+; CHECK-NEXT: [[GEP_L8:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[OFFSET8]]
+; CHECK-NEXT: [[GEP_L12:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[OFFSET12]]
+; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[GEP_L0]], align 16
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr [[GEP_L4]], align 16
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i8>, ptr [[GEP_L8]], align 16
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i8>, ptr [[GEP_L12]], align 16
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i8> [[TMP2]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> [[TMP2]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x i8> [[TMP3]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> [[TMP11]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x i8> [[TMP4]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+; CHECK-NEXT: store <16 x i8> [[TMP8]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT: ret void
+;
+ %offset0 = mul nsw i64 %stride, 0
+ %offset1 = add nsw i64 %offset0, 1
+ %offset2 = add nsw i64 %offset0, 2
+ %offset3 = add nsw i64 %offset0, 3
+ %offset4 = mul nsw i64 %stride, 1
+ %offset5 = add nsw i64 %offset4, 1
+ %offset6 = add nsw i64 %offset4, 2
+ %offset7 = add nsw i64 %offset4, 3
+ %offset8 = mul nsw i64 %stride, 2
+ %offset9 = add nsw i64 %offset8, 1
+ %offset10 = add nsw i64 %offset8, 2
+ %offset11 = add nsw i64 %offset8, 3
+ %offset12 = mul nsw i64 %stride, 3
+ %offset13 = add nsw i64 %offset12, 1
+ %offset14 = add nsw i64 %offset12, 2
+ %offset15 = add nsw i64 %offset12, 3
+
+ %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %offset0
+ %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 %offset1
+ %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 %offset2
+ %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 %offset3
+ %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 %offset4
+ %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 %offset5
+ %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 %offset6
+ %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 %offset7
+ %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 %offset8
+ %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 %offset9
+ %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 %offset10
+ %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 %offset11
+ %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 %offset12
+ %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 %offset13
+ %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 %offset14
+ %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 %offset15
+
+ %load0 = load i8, ptr %gep_l0 , align 16
+ %load1 = load i8, ptr %gep_l1 , align 16
+ %load2 = load i8, ptr %gep_l2 , align 16
+ %load3 = load i8, ptr %gep_l3 , align 16
+ %load4 = load i8, ptr %gep_l4 , align 16
+ %load5 = load i8, ptr %gep_l5 , align 16
+ %load6 = load i8, ptr %gep_l6 , align 16
+ %load7 = load i8, ptr %gep_l7 , align 16
+ %load8 = load i8, ptr %gep_l8 , align 16
+ %load9 = load i8, ptr %gep_l9 , align 16
+ %load10 = load i8, ptr %gep_l10, align 16
+ %load11 = load i8, ptr %gep_l11, align 16
+ %load12 = load i8, ptr %gep_l12, align 16
+ %load13 = load i8, ptr %gep_l13, align 16
+ %load14 = load i8, ptr %gep_l14, align 16
+ %load15 = load i8, ptr %gep_l15, align 16
+
+ %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+ %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+ %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+ %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+ %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+ %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+ %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+ %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+ %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+ %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+ %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+ %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+ %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+ %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+ %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+ %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+ store i8 %load0, ptr %gep_s0, align 16
+ store i8 %load1, ptr %gep_s1, align 16
+ store i8 %load2, ptr %gep_s2, align 16
+ store i8 %load3, ptr %gep_s3, align 16
+ store i8 %load4, ptr %gep_s4, align 16
+ store i8 %load5, ptr %gep_s5, align 16
+ store i8 %load6, ptr %gep_s6, align 16
+ store i8 %load7, ptr %gep_s7, align 16
+ store i8 %load8, ptr %gep_s8, align 16
+ store i8 %load9, ptr %gep_s9, align 16
+ store i8 %load10, ptr %gep_s10, align 16
+ store i8 %load11, ptr %gep_s11, align 16
+ store i8 %load12, ptr %gep_s12, align 16
+ store i8 %load13, ptr %gep_s13, align 16
+ store i8 %load14, ptr %gep_s14, align 16
+ store i8 %load15, ptr %gep_s15, align 16
+
+ ret void
+}
diff --git a/llvm/test/tools/llvm-objdump/MachO/bad-trie.test b/llvm/test/tools/llvm-objdump/MachO/bad-trie.test
index 8b29d30..e4d0ed5 100644
--- a/llvm/test/tools/llvm-objdump/MachO/bad-trie.test
+++ b/llvm/test/tools/llvm-objdump/MachO/bad-trie.test
@@ -11,7 +11,7 @@ RUN: not llvm-objdump --macho --exports-trie %p/Inputs/macho-trie-export-info-si
EXPORT_INFO_SIZE_TOO_BIG: macho-trie-export-info-size-too-big': truncated or malformed object (export info size: 0x1234 in export trie data at node: 0x33 too big and extends past end of trie data)
RUN: not llvm-objdump --macho --exports-trie %p/Inputs/macho-trie-children-count-byte 2>&1 | FileCheck --check-prefix CHILDREN_COUNT_BYTE %s
-CHILDREN_COUNT_BYTE: macho-trie-children-count-byte': truncated or malformed object (byte for count of childern in export trie data at node: 0x5 extends past end of trie data)
+CHILDREN_COUNT_BYTE: macho-trie-children-count-byte': truncated or malformed object (byte for count of children in export trie data at node: 0x5 extends past end of trie data)
RUN: not llvm-objdump --macho --exports-trie %p/Inputs/macho-trie-import-name-start 2>&1 | FileCheck --check-prefix IMPORT_NAME_START %s
IMPORT_NAME_START: macho-trie-import-name-start': truncated or malformed object (import name of re-export in export trie data at node: 0x33 starts past end of trie data)
@@ -25,8 +25,8 @@ EDGE_STRING_END: macho-trie-edge-string-end': truncated or malformed object (edg
RUN: not llvm-objdump --macho --exports-trie %p/Inputs/macho-trie-not-export-node 2>&1 | FileCheck --check-prefix NOT_EXPORT_NODE %s
NOT_EXPORT_NODE: macho-trie-not-export-node': truncated or malformed object (node is not an export node in export trie data at node: 0x5a)
-RUN: not llvm-objdump --macho --exports-trie %p/Inputs/macho-trie-node-loop 2>&1 | FileCheck --check-prefix LOOP_OF_CHILDERN %s
-LOOP_OF_CHILDERN: macho-trie-node-loop': truncated or malformed object (loop in childern in export trie data at node: 0x42 back to node: 0x5)
+RUN: not llvm-objdump --macho --exports-trie %p/Inputs/macho-trie-node-loop 2>&1 | FileCheck --check-prefix LOOP_OF_CHILDREN %s
+LOOP_OF_CHILDREN: macho-trie-node-loop': truncated or malformed object (loop in children in export trie data at node: 0x42 back to node: 0x5)
RUN: not llvm-objdump --macho --exports-trie %p/Inputs/macho-trie-bad-library-ordinal 2>&1 | FileCheck --check-prefix BAD_LIBRARY_ORDINAL %s
BAD_LIBRARY_ORDINAL: macho-trie-bad-library-ordinal': truncated or malformed object (bad library ordinal: 69 (max 3) in export trie data at node: 0x33)