aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalize-modf.mir206
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/select-modf.mir136
-rw-r--r--llvm/test/CodeGen/AArch64/cbz_wzr.mir260
-rw-r--r--llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir8
-rw-r--r--llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/framelayout-split-sve.mir587
-rw-r--r--llvm/test/CodeGen/AArch64/framelayout-sve.mir12
-rw-r--r--llvm/test/CodeGen/AArch64/freeze.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll20
-rw-r--r--llvm/test/CodeGen/AArch64/llvm.modf.ll459
-rw-r--r--llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll86
-rw-r--r--llvm/test/CodeGen/AArch64/pr161420.ll54
-rw-r--r--llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x4.ll21
-rw-r--r--llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4.ll23
-rw-r--r--llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir16
-rw-r--r--llvm/test/CodeGen/AArch64/spillfill-sve.mir10
-rw-r--r--llvm/test/CodeGen/AArch64/split-sve-stack-frame-layout.ll824
-rw-r--r--llvm/test/CodeGen/AArch64/stack-hazard.ll876
-rw-r--r--llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/sve-load-store-legalisation.ll2854
-rw-r--r--llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/tbz-tbnz.ll324
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll104
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll104
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll518
-rw-r--r--llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll1641
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll131
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll96
-rw-r--r--llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll94
-rw-r--r--llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll1689
-rw-r--r--llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll1580
-rw-r--r--llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll1580
-rw-r--r--llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll326
-rw-r--r--llvm/test/CodeGen/AMDGPU/div_v2i128.ll126
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptoi.i128.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll76
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll47
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll47
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll76
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll721
-rw-r--r--llvm/test/CodeGen/AMDGPU/identical-subrange-spill-infloop.ll331
-rw-r--r--llvm/test/CodeGen/AMDGPU/infer-addrspace-flat-atomic.ll14
-rw-r--r--llvm/test/CodeGen/AMDGPU/lds-frame-extern.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/limit-coalesce.mir33
-rw-r--r--llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/llc-pipeline.ll60
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll92
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll3
-rw-r--r--llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/lower-module-lds-precise-allocate-to-module-struct.ll89
-rw-r--r--llvm/test/CodeGen/AMDGPU/lower-module-lds-via-hybrid.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/lower-module-lds-via-table.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll22
-rw-r--r--llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/memmove-var-size.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll75
-rw-r--r--llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir148
-rw-r--r--llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll14
-rw-r--r--llvm/test/CodeGen/AMDGPU/ptradd-sdag-mubuf.ll7
-rw-r--r--llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll94
-rw-r--r--llvm/test/CodeGen/AMDGPU/ptradd-sdag-undef-poison.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/ptradd-sdag.ll27
-rw-r--r--llvm/test/CodeGen/AMDGPU/resource-usage-dead-function.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/store-weird-sizes.ll29
-rw-r--r--llvm/test/CodeGen/ARM/inline-asm-clobber.ll7
-rw-r--r--llvm/test/CodeGen/ARM/llrint-conv.ll69
-rw-r--r--llvm/test/CodeGen/ARM/llvm.exp10.ll16
-rw-r--r--llvm/test/CodeGen/ARM/llvm.frexp.ll36
-rw-r--r--llvm/test/CodeGen/ARM/lrint-conv.ll37
-rw-r--r--llvm/test/CodeGen/ARM/vector-lrint.ll20
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressU.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressV.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressW.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-BorderColor.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ComparisonFunc.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Filter.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Flag.ll19
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxAnisotropy.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-RegisterSpace.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderRegister.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderVisibility.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers_V3.ll42
-rw-r--r--llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/rootsignature-validation-fail-static-sampler-range.ll4
-rw-r--r--llvm/test/CodeGen/Hexagon/unaligned-vec-store.ll23
-rw-r--r--llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll1
-rw-r--r--llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll145
-rw-r--r--llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll20
-rw-r--r--llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll40
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir7
-rw-r--r--llvm/test/CodeGen/RISCV/cmov-branch-opt.ll109
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/expandload.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll114
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll72
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll80
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/remat.ll132
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll36
-rw-r--r--llvm/test/CodeGen/RISCV/select-bare.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/select-cc.ll59
-rw-r--r--llvm/test/CodeGen/RISCV/select-cond.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/select-const.ll137
-rw-r--r--llvm/test/CodeGen/RISCV/select.ll322
-rw-r--r--llvm/test/CodeGen/RISCV/xqcicli.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/xqcicm.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/xqcics.ll126
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll53
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/UniqueImplicitBindingNumber.ll19
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-cmp-04.ll4
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll41
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vld3.ll414
-rw-r--r--llvm/test/CodeGen/VE/Vector/vec_divrem.ll56
-rw-r--r--llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll78
-rw-r--r--llvm/test/CodeGen/X86/combine-pack.ll49
-rw-r--r--llvm/test/CodeGen/X86/fshl.ll81
-rw-r--r--llvm/test/CodeGen/X86/fshr.ll90
-rw-r--r--llvm/test/CodeGen/X86/sbb.ll29
-rw-r--r--llvm/test/CodeGen/X86/shift-i128.ll3
125 files changed, 12744 insertions, 6815 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-modf.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-modf.mir
new file mode 100644
index 0000000..36ac7eb
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-modf.mir
@@ -0,0 +1,206 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6
+# RUN: llc -mtriple=aarch64 -run-pass=legalizer %s -o - | FileCheck %s
+---
+name: test_modf_f16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_modf_f16
+ ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $h0
+ ; CHECK-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[COPY]](s16)
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $s0 = COPY [[FPEXT]](s32)
+ ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX]](p0)
+ ; CHECK-NEXT: BL &modff, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $x0, implicit-def $s0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $s0
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s32) from %stack.0)
+ ; CHECK-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[LOAD]](s32)
+ ; CHECK-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY1]](s32)
+ ; CHECK-NEXT: $h0 = COPY [[FPTRUNC1]](s16)
+ ; CHECK-NEXT: $h1 = COPY [[FPTRUNC]](s16)
+ ; CHECK-NEXT: RET_ReallyLR implicit $h0, implicit $h1
+ %0:_(s16) = COPY $h0
+ %1:_(s16), %2:_(s16) = G_FMODF %0
+ $h0 = COPY %1(s16)
+ $h1 = COPY %2(s16)
+ RET_ReallyLR implicit $h0, implicit $h1
+...
+---
+name: test_modf_f16_only_use_fractional_part
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_modf_f16_only_use_fractional_part
+ ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $h0
+ ; CHECK-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[COPY]](s16)
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $s0 = COPY [[FPEXT]](s32)
+ ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX]](p0)
+ ; CHECK-NEXT: BL &modff, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $x0, implicit-def $s0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $s0
+ ; CHECK-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY1]](s32)
+ ; CHECK-NEXT: $h0 = COPY [[FPTRUNC]](s16)
+ ; CHECK-NEXT: RET_ReallyLR implicit $h0
+ %0:_(s16) = COPY $h0
+ %1:_(s16), %2:_(s16) = G_FMODF %0
+ $h0 = COPY %1(s16)
+ RET_ReallyLR implicit $h0
+...
+---
+name: test_modf_v2f16
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_modf_v2f16
+ ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; CHECK-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $s0 = COPY [[FPEXT]](s32)
+ ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX]](p0)
+ ; CHECK-NEXT: BL &modff, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $x0, implicit-def $s0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $s0
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s32) from %stack.1)
+ ; CHECK-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[LOAD]](s32)
+ ; CHECK-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY1]](s32)
+ ; CHECK-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
+ ; CHECK-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $s0 = COPY [[FPEXT1]](s32)
+ ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX1]](p0)
+ ; CHECK-NEXT: BL &modff, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $x0, implicit-def $s0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (s32) from %stack.0)
+ ; CHECK-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[LOAD1]](s32)
+ ; CHECK-NEXT: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY2]](s32)
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[FPTRUNC1]](s16), [[FPTRUNC3]](s16), [[DEF]](s16), [[DEF]](s16)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[FPTRUNC]](s16), [[FPTRUNC2]](s16), [[DEF]](s16), [[DEF]](s16)
+ ; CHECK-NEXT: $d0 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ ; CHECK-NEXT: $d1 = COPY [[BUILD_VECTOR1]](<4 x s16>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $d0, implicit $d1
+ %1:_(<4 x s16>) = COPY $d0
+ %0:_(<2 x s16>), %2:_(<2 x s16>) = G_UNMERGE_VALUES %1(<4 x s16>)
+ %3:_(<2 x s16>), %4:_(<2 x s16>) = G_FMODF %0
+ %5:_(s16), %6:_(s16) = G_UNMERGE_VALUES %3(<2 x s16>)
+ %7:_(s16) = G_IMPLICIT_DEF
+ %8:_(<4 x s16>) = G_BUILD_VECTOR %5(s16), %6(s16), %7(s16), %7(s16)
+ %9:_(s16), %10:_(s16) = G_UNMERGE_VALUES %4(<2 x s16>)
+ %11:_(<4 x s16>) = G_BUILD_VECTOR %9(s16), %10(s16), %7(s16), %7(s16)
+ $d0 = COPY %8(<4 x s16>)
+ $d1 = COPY %11(<4 x s16>)
+ RET_ReallyLR implicit $d0, implicit $d1
+...
+---
+name: test_modf_v3f32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_modf_v3f32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY]](<2 x s64>)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BITCAST]](<4 x s32>)
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.2
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $s0 = COPY [[UV]](s32)
+ ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX]](p0)
+ ; CHECK-NEXT: BL &modff, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $x0, implicit-def $s0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $s0
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s32) from %stack.2)
+ ; CHECK-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $s0 = COPY [[UV1]](s32)
+ ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX1]](p0)
+ ; CHECK-NEXT: BL &modff, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $x0, implicit-def $s0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (s32) from %stack.1)
+ ; CHECK-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $s0 = COPY [[UV2]](s32)
+ ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX2]](p0)
+ ; CHECK-NEXT: BL &modff, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $x0, implicit-def $s0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0
+ ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (load (s32) from %stack.0)
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[DEF]](s32)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32), [[LOAD2]](s32), [[DEF]](s32)
+ ; CHECK-NEXT: $q0 = COPY [[BUILD_VECTOR]](<4 x s32>)
+ ; CHECK-NEXT: $q1 = COPY [[BUILD_VECTOR1]](<4 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1
+ %1:_(<2 x s64>) = COPY $q0
+ %2:_(<4 x s32>) = G_BITCAST %1(<2 x s64>)
+ %3:_(s32), %4:_(s32), %5:_(s32), %6:_(s32) = G_UNMERGE_VALUES %2(<4 x s32>)
+ %0:_(<3 x s32>) = G_BUILD_VECTOR %3(s32), %4(s32), %5(s32)
+ %7:_(<3 x s32>), %8:_(<3 x s32>) = G_FMODF %0
+ %9:_(s32), %10:_(s32), %11:_(s32) = G_UNMERGE_VALUES %7(<3 x s32>)
+ %12:_(s32) = G_IMPLICIT_DEF
+ %13:_(<4 x s32>) = G_BUILD_VECTOR %9(s32), %10(s32), %11(s32), %12(s32)
+ %14:_(s32), %15:_(s32), %16:_(s32) = G_UNMERGE_VALUES %8(<3 x s32>)
+ %17:_(<4 x s32>) = G_BUILD_VECTOR %14(s32), %15(s32), %16(s32), %12(s32)
+ $q0 = COPY %13(<4 x s32>)
+ $q1 = COPY %17(<4 x s32>)
+ RET_ReallyLR implicit $q0, implicit $q1
+...
+---
+name: test_modf_v2f64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_modf_v2f64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $d0 = COPY [[UV]](s64)
+ ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX]](p0)
+ ; CHECK-NEXT: BL &modf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $x0, implicit-def $d0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $d0
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s64) from %stack.1)
+ ; CHECK-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $d0 = COPY [[UV1]](s64)
+ ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX1]](p0)
+ ; CHECK-NEXT: BL &modf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $x0, implicit-def $d0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $d0
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (load (s64) from %stack.0)
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[COPY1]](s64), [[COPY2]](s64)
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[LOAD]](s64), [[LOAD1]](s64)
+ ; CHECK-NEXT: $q0 = COPY [[BUILD_VECTOR]](<2 x s64>)
+ ; CHECK-NEXT: $q1 = COPY [[BUILD_VECTOR1]](<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1
+ %0:_(<2 x s64>) = COPY $q0
+ %1:_(<2 x s64>), %2:_(<2 x s64>) = G_FMODF %0
+ $q0 = COPY %1(<2 x s64>)
+ $q1 = COPY %2(<2 x s64>)
+ RET_ReallyLR implicit $q0, implicit $q1
+...
+---
+name: test_modf_fp128
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: test_modf_fp128
+ ; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $q0
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $q0 = COPY [[COPY]](s128)
+ ; CHECK-NEXT: $x0 = COPY [[FRAME_INDEX]](p0)
+ ; CHECK-NEXT: BL &modfl, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $q0, implicit $x0, implicit-def $q0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s128) = COPY $q0
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s128) from %stack.0)
+ ; CHECK-NEXT: $q0 = COPY [[COPY1]](s128)
+ ; CHECK-NEXT: $q1 = COPY [[LOAD]](s128)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1
+ %0:_(s128) = COPY $q0
+ %1:_(s128), %2:_(s128) = G_FMODF %0
+ $q0 = COPY %1(s128)
+ $q1 = COPY %2(s128)
+ RET_ReallyLR implicit $q0, implicit $q1
+...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
index ba867f4..d721b73c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
@@ -508,6 +508,10 @@
# DEBUG-NEXT: G_FREM (opcode {{[0-9]+}}): 1 type index, 0 imm indices
# DEBUG-NEXT: .. the first uncovered type index: 1, OK
# DEBUG-NEXT: .. the first uncovered imm index: 0, OK
+# DEBUG-NEXT: G_FMODF (opcode {{[0-9]+}}): 1 type index, 0 imm indices
+# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
+# DEBUG-NEXT: .. the first uncovered type index: 1, OK
+# DEBUG-NEXT: .. the first uncovered imm index: 0, OK
# DEBUG-NEXT: G_FPOW (opcode {{[0-9]+}}): 1 type index, 0 imm indices
# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
# DEBUG-NEXT: .. the first uncovered type index: 1, OK
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-modf.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-modf.mir
new file mode 100644
index 0000000..604cb96
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-modf.mir
@@ -0,0 +1,136 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6
+# RUN: llc -verify-machineinstrs -mtriple aarch64-unknown-unknown -run-pass=instruction-select %s -o - | FileCheck %s
+---
+name: test_modf_fp128
+alignment: 4
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+liveins:
+ - { reg: '$q0' }
+frameInfo:
+ maxAlignment: 16
+stack:
+ - { id: 0, size: 16, alignment: 16 }
+body: |
+ bb.1:
+ liveins: $q0
+
+ ; CHECK-LABEL: name: test_modf_fp128
+ ; CHECK: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+ ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0, 0
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $q0 = COPY [[COPY]]
+ ; CHECK-NEXT: $x0 = COPY [[ADDXri]]
+ ; CHECK-NEXT: BL &modfl, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $q0, implicit $x0, implicit-def $q0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+ ; CHECK-NEXT: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui %stack.0, 0 :: (load (s128) from %stack.0)
+ ; CHECK-NEXT: $q0 = COPY [[COPY1]]
+ ; CHECK-NEXT: $q1 = COPY [[LDRQui]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1
+ %0:fpr(s128) = COPY $q0
+ %3:gpr(p0) = G_FRAME_INDEX %stack.0
+ ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ $q0 = COPY %0(s128)
+ $x0 = COPY %3(p0)
+ BL &modfl, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $q0, implicit $x0, implicit-def $q0
+ ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ %1:fpr(s128) = COPY $q0
+ %2:fpr(s128) = G_LOAD %3(p0) :: (load (s128) from %stack.0)
+ $q0 = COPY %1(s128)
+ $q1 = COPY %2(s128)
+ RET_ReallyLR implicit $q0, implicit $q1
+...
+---
+name: test_modf_double
+alignment: 4
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+liveins:
+ - { reg: '$d0' }
+frameInfo:
+ maxAlignment: 8
+stack:
+ - { id: 0, size: 8, alignment: 8 }
+machineFunctionInfo: {}
+body: |
+ bb.1:
+ liveins: $d0
+
+ ; CHECK-LABEL: name: test_modf_double
+ ; CHECK: liveins: $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+ ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0, 0
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $d0 = COPY [[COPY]]
+ ; CHECK-NEXT: $x0 = COPY [[ADDXri]]
+ ; CHECK-NEXT: BL &modf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $x0, implicit-def $d0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+ ; CHECK-NEXT: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui %stack.0, 0 :: (load (s64) from %stack.0)
+ ; CHECK-NEXT: $d0 = COPY [[COPY1]]
+ ; CHECK-NEXT: $d1 = COPY [[LDRDui]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d0, implicit $d1
+ %0:fpr(s64) = COPY $d0
+ %3:gpr(p0) = G_FRAME_INDEX %stack.0
+ ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ $d0 = COPY %0(s64)
+ $x0 = COPY %3(p0)
+ BL &modf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $x0, implicit-def $d0
+ ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ %1:fpr(s64) = COPY $d0
+ %2:fpr(s64) = G_LOAD %3(p0) :: (load (s64) from %stack.0)
+ $d0 = COPY %1(s64)
+ $d1 = COPY %2(s64)
+ RET_ReallyLR implicit $d0, implicit $d1
+...
+---
+name: test_modf_double_vec
+alignment: 4
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+liveins:
+ - { reg: '$d0' }
+frameInfo:
+ maxAlignment: 8
+stack:
+ - { id: 0, size: 8, alignment: 8 }
+machineFunctionInfo: {}
+body: |
+ bb.1:
+ liveins: $d0
+
+ ; CHECK-LABEL: name: test_modf_double_vec
+ ; CHECK: liveins: $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+ ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0, 0
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: $d0 = COPY [[COPY]]
+ ; CHECK-NEXT: $x0 = COPY [[ADDXri]]
+ ; CHECK-NEXT: BL &modf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $x0, implicit-def $d0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+ ; CHECK-NEXT: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui %stack.0, 0 :: (load (s64) from %stack.0)
+ ; CHECK-NEXT: $d0 = COPY [[COPY1]]
+ ; CHECK-NEXT: $d1 = COPY [[LDRDui]]
+ ; CHECK-NEXT: RET_ReallyLR implicit $d0, implicit $d1
+ %0:fpr(s64) = COPY $d0
+ %3:gpr(p0) = G_FRAME_INDEX %stack.0
+ ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+ $d0 = COPY %0(s64)
+ $x0 = COPY %3(p0)
+ BL &modf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $x0, implicit-def $d0
+ ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+ %1:fpr(s64) = COPY $d0
+ %2:fpr(s64) = G_LOAD %3(p0) :: (load (s64) from %stack.0)
+ $d0 = COPY %1(s64)
+ $d1 = COPY %2(s64)
+ RET_ReallyLR implicit $d0, implicit $d1
+...
diff --git a/llvm/test/CodeGen/AArch64/cbz_wzr.mir b/llvm/test/CodeGen/AArch64/cbz_wzr.mir
new file mode 100644
index 0000000..7deea56
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/cbz_wzr.mir
@@ -0,0 +1,260 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6
+# RUN: llc -o - %s -mtriple=aarch64-none-eabi -run-pass=machine-cp -mcp-use-is-copy-instr | FileCheck %s
+
+---
+name: cbz_wzr
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: cbz_wzr
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: CBZW $wzr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: $w0 = MOVZWi 10, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: $w0 = MOVZWi 20, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ bb.0:
+ liveins: $x0
+
+ $w8 = ORRWrs $wzr, $wzr, 0
+ CBZW killed renamable $w8, %bb.2
+
+ bb.1:
+ $w0 = MOVZWi 10, 0
+ RET undef $lr, implicit $w0
+
+ bb.2:
+ $w0 = MOVZWi 20, 0
+ RET undef $lr, implicit $w0
+...
+---
+name: cbnz_wzr
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: cbnz_wzr
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: CBNZW $wzr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: $w0 = MOVZWi 10, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: $w0 = MOVZWi 20, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ bb.0:
+ liveins: $x0
+
+ $w8 = ORRWrs $wzr, $wzr, 0
+ CBNZW killed renamable $w8, %bb.2
+
+ bb.1:
+ $w0 = MOVZWi 10, 0
+ RET undef $lr, implicit $w0
+
+ bb.2:
+ $w0 = MOVZWi 20, 0
+ RET undef $lr, implicit $w0
+...
+---
+name: tbz_wzr
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: tbz_wzr
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: TBZW $wzr, 0, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: $w0 = MOVZWi 10, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: $w0 = MOVZWi 20, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ bb.0:
+ liveins: $x0
+
+ $w8 = ORRWrs $wzr, $wzr, 0
+ TBZW killed renamable $w8, 0, %bb.2
+
+ bb.1:
+ $w0 = MOVZWi 10, 0
+ RET undef $lr, implicit $w0
+
+ bb.2:
+ $w0 = MOVZWi 20, 0
+ RET undef $lr, implicit $w0
+...
+---
+name: tbnz_wzr
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: tbnz_wzr
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: TBNZW $wzr, 0, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: $w0 = MOVZWi 10, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: $w0 = MOVZWi 20, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ bb.0:
+ liveins: $x0
+
+ $w8 = ORRWrs $wzr, $wzr, 0
+ TBNZW killed renamable $w8, 0, %bb.2
+
+ bb.1:
+ $w0 = MOVZWi 10, 0
+ RET undef $lr, implicit $w0
+
+ bb.2:
+ $w0 = MOVZWi 20, 0
+ RET undef $lr, implicit $w0
+...
+
+---
+name: cbz_xzr
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: cbz_xzr
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: CBZX $xzr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: $w0 = MOVZWi 10, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: $w0 = MOVZWi 20, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ bb.0:
+ liveins: $x0
+
+ $x8 = ORRXrs $xzr, $xzr, 0
+ CBZX killed renamable $x8, %bb.2
+
+ bb.1:
+ $w0 = MOVZWi 10, 0
+ RET undef $lr, implicit $w0
+
+ bb.2:
+ $w0 = MOVZWi 20, 0
+ RET undef $lr, implicit $w0
+...
+---
+name: cbnz_xzr
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: cbnz_xzr
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: CBNZX $xzr, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: $w0 = MOVZWi 10, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: $w0 = MOVZWi 20, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ bb.0:
+ liveins: $x0
+
+ $x8 = ORRXrs $xzr, $xzr, 0
+ CBNZX killed renamable $x8, %bb.2
+
+ bb.1:
+ $w0 = MOVZWi 10, 0
+ RET undef $lr, implicit $w0
+
+ bb.2:
+ $w0 = MOVZWi 20, 0
+ RET undef $lr, implicit $w0
+...
+---
+name: tbz_xzr
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: tbz_xzr
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: TBZX $xzr, 0, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: $w0 = MOVZWi 10, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: $w0 = MOVZWi 20, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ bb.0:
+ liveins: $x0
+
+ $x8 = ORRXrs $xzr, $xzr, 0
+ TBZX killed renamable $x8, 0, %bb.2
+
+ bb.1:
+ $w0 = MOVZWi 10, 0
+ RET undef $lr, implicit $w0
+
+ bb.2:
+ $w0 = MOVZWi 20, 0
+ RET undef $lr, implicit $w0
+...
+---
+name: tbnz_xzr
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: tbnz_xzr
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: TBNZX $xzr, 0, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: $w0 = MOVZWi 10, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: $w0 = MOVZWi 20, 0
+ ; CHECK-NEXT: RET undef $lr, implicit $w0
+ bb.0:
+ liveins: $x0
+
+ $x8 = ORRXrs $xzr, $xzr, 0
+ TBNZX killed renamable $x8, 0, %bb.2
+
+ bb.1:
+ $w0 = MOVZWi 10, 0
+ RET undef $lr, implicit $w0
+
+ bb.2:
+ $w0 = MOVZWi 20, 0
+ RET undef $lr, implicit $w0
+...
diff --git a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir
index aca2816..7fd0cee 100644
--- a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir
+++ b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir
@@ -164,10 +164,10 @@ stack:
- { id: 1, name: z1.addr, size: 16, alignment: 16, stack-id: scalable-vector,
debug-info-variable: '!31', debug-info-expression: '!DIExpression()',
debug-info-location: '!32' }
- - { id: 2, name: p0.addr, size: 2, alignment: 2, stack-id: scalable-vector,
+ - { id: 2, name: p0.addr, size: 2, alignment: 2, stack-id: scalable-predicate-vector,
debug-info-variable: '!33', debug-info-expression: '!DIExpression()',
debug-info-location: '!34' }
- - { id: 3, name: p1.addr, size: 2, alignment: 2, stack-id: scalable-vector,
+ - { id: 3, name: p1.addr, size: 2, alignment: 2, stack-id: scalable-predicate-vector,
debug-info-variable: '!35', debug-info-expression: '!DIExpression()',
debug-info-location: '!36' }
- { id: 4, name: w0.addr, size: 4, alignment: 4, local-offset: -4, debug-info-variable: '!37',
@@ -181,10 +181,10 @@ stack:
- { id: 7, name: localv1, size: 16, alignment: 16, stack-id: scalable-vector,
debug-info-variable: '!45', debug-info-expression: '!DIExpression()',
debug-info-location: '!46' }
- - { id: 8, name: localp0, size: 2, alignment: 2, stack-id: scalable-vector,
+ - { id: 8, name: localp0, size: 2, alignment: 2, stack-id: scalable-predicate-vector,
debug-info-variable: '!48', debug-info-expression: '!DIExpression()',
debug-info-location: '!49' }
- - { id: 9, name: localp1, size: 2, alignment: 2, stack-id: scalable-vector,
+ - { id: 9, name: localp1, size: 2, alignment: 2, stack-id: scalable-predicate-vector,
debug-info-variable: '!51', debug-info-expression: '!DIExpression()',
debug-info-location: '!52' }
machineFunctionInfo: {}
diff --git a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir
index 0ea180b..41ba554 100644
--- a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir
+++ b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir
@@ -96,8 +96,8 @@ stack:
- { id: 1, size: 8, alignment: 8 }
- { id: 2, size: 16, alignment: 16, stack-id: scalable-vector }
- { id: 3, size: 16, alignment: 16, stack-id: scalable-vector }
- - { id: 4, size: 2, alignment: 2, stack-id: scalable-vector }
- - { id: 5, size: 2, alignment: 2, stack-id: scalable-vector }
+ - { id: 4, size: 2, alignment: 2, stack-id: scalable-predicate-vector }
+ - { id: 5, size: 2, alignment: 2, stack-id: scalable-predicate-vector }
machineFunctionInfo: {}
body: |
bb.0.entry:
diff --git a/llvm/test/CodeGen/AArch64/framelayout-split-sve.mir b/llvm/test/CodeGen/AArch64/framelayout-split-sve.mir
new file mode 100644
index 0000000..35eafe8
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/framelayout-split-sve.mir
@@ -0,0 +1,587 @@
+# RUN: llc -mattr=+sve -aarch64-stack-hazard-in-non-streaming -aarch64-split-sve-objects -aarch64-streaming-hazard-size=1024 -mtriple=aarch64-none-linux-gnu -run-pass=prologepilog %s -o - | FileCheck %s
+# RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+sve -aarch64-stack-hazard-in-non-streaming -aarch64-split-sve-objects -aarch64-streaming-hazard-size=1024 -start-before=prologepilog %s -o - | FileCheck %s --check-prefix=ASM
+# RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+sve -aarch64-stack-hazard-in-non-streaming -aarch64-split-sve-objects -aarch64-streaming-hazard-size=1024 -start-before=prologepilog %s -filetype=obj -o %t
+# RUN: llvm-objdump --dwarf=frames %t | FileCheck %s --check-prefix=UNWINDINFO
+# RUN: rm -rf %t
+#
+# Test allocation and deallocation of SVE objects on the stack with
+# split-sve-objects (and hazard padding) enabled. This also tests using a
+# combination of scalable and non-scalable offsets to access the SVE on the
+# stack.
+#
+# With split-sve-objects (which implies hazard padding) the SVE area is split
+# into PPR and ZPR areas with (fixed-size) hazard padding between them. The PPR
+# area holds all scalable predicate callee saves and locals, and the ZPR area
+# holds all scalable vector callee saves and locals. Additionally, any FPR
+# callee save is promoted to a ZPR callee save (to avoid needing additional
+# hazard padding in the callee save area).
+#
+# +-------------+
+# | stack arg |
+# +-------------+ <- SP before call
+# | Callee Saves|
+# | Frame record| (if available)
+# |-------------| <- FP (if available)
+# | PPR area |
+# |-------------|
+# |/////////////| hazard padding
+# |-------------|
+# | ZPR area |
+# +-------------+
+# | : |
+# | Stack objs |
+# | : |
+# +-------------+ <- SP after call and frame-setup
+#
+--- |
+
+ define void @test_allocate_split_sve() uwtable { entry: unreachable }
+ define void @test_allocate_split_sve_realigned() uwtable { entry: unreachable }
+ define void @test_address_split_sve() uwtable { entry: unreachable }
+ define void @test_address_split_sve_fp() uwtable { entry: unreachable }
+ define aarch64_sve_vector_pcs void @save_restore_ppr_zpr() uwtable { entry: unreachable }
+
+...
+---
+# +----------+
+# |scratchreg| // x29 is used as scratch reg.
+# |----------|
+# | %stack.0 | // scalable predicate of n * 12 bytes, aligned to 16 bytes
+# | | // to be materialized with 1*ADDVL (<=> n * 16 bytes)
+# |----------|
+# |//////////| // hazard padding (1024 bytes) -- part of PPR locals area
+# |//////////| // Note: This is currently not included in the "stackSize"
+# +----------+
+# | %stack.0 | // scalable SVE object of n * 18 bytes, aligned to 16 bytes,
+# | | // to be materialized with 2*ADDVL (<=> 2 * n * 16 bytes)
+# +----------+
+# |//////////| // hazard padding (1024 bytes)
+# |----------|
+# | %stack.1 | // not scalable
+# +----------+ <- SP
+
+# CHECK-LABEL: name: test_allocate_split_sve
+# CHECK: stackSize: 1056
+
+# CHECK: bb.0.entry:
+# CHECK: liveins: $z0, $p0, $fp
+# CHECK: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.4)
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
+# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 1040
+# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22
+#
+# CHECK-NEXT: $x8 = ADDXri $sp, 1040, 0
+# CHECK-NEXT: $x8 = ADDPL_XXI $x8, 7, implicit $vg
+# CHECK-NEXT: STR_ZXI $z0, killed $x8, 0 :: (store (<vscale x 1 x s128>) into %stack.0)
+# CHECK-NEXT: $x8 = ADDXri $sp, 2064, 0
+# CHECK-NEXT: STR_PXI $p0, killed $x8, 18 :: (store (<vscale x 1 x s16>) into %stack.1)
+#
+# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 1056
+# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 16
+# CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.4)
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29
+# CHECK-NEXT: RET_ReallyLR
+
+# ASM-LABEL: test_allocate_split_sve:
+# ASM: str x29, [sp, #-16]!
+# ASM-NEXT: .cfi_def_cfa_offset 16
+# ASM-NEXT: .cfi_offset w29, -16
+# ASM-NEXT: sub sp, sp, #1024
+# ASM-NEXT: .cfi_def_cfa_offset 1040
+# ASM-NEXT: addvl sp, sp, #-1
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 1040 + 8 * VG
+# ASM-NEXT: sub sp, sp, #1040
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG
+# ASM-NEXT: addvl sp, sp, #-2
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 2080 + 24 * VG
+#
+# ASM: addvl sp, sp, #2
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG
+# ASM-NEXT: add sp, sp, #1024
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 1056 + 8 * VG
+# ASM-NEXT: addvl sp, sp, #1
+# ASM-NEXT: .cfi_def_cfa wsp, 1056
+# ASM-NEXT: add sp, sp, #1040
+# ASM-NEXT: .cfi_def_cfa_offset 16
+# ASM-NEXT: ldr x29, [sp], #16
+# ASM-NEXT: .cfi_def_cfa_offset 0
+# ASM-NEXT: .cfi_restore w29
+
+# UNWINDINFO: DW_CFA_def_cfa_offset: +16
+# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
+# UNWINDINFO: DW_CFA_def_cfa_offset: +1040
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1040, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus
+#
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1056, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa: reg31 +1056
+# UNWINDINFO: DW_CFA_def_cfa_offset: +16
+# UNWINDINFO: DW_CFA_def_cfa_offset: +0
+# UNWINDINFO-NEXT: DW_CFA_restore: reg29
+
+name: test_allocate_split_sve
+stack:
+ - { id: 0, stack-id: scalable-vector, size: 18, alignment: 2 }
+ - { id: 1, stack-id: scalable-vector, size: 12, alignment: 2 }
+ - { id: 2, stack-id: default, size: 16, alignment: 8 }
+body: |
+ bb.0.entry:
+ liveins: $z0, $p0
+ STR_ZXI $z0, %stack.0, 0 :: (store (<vscale x 1 x s128>) into %stack.0)
+ STR_PXI $p0, %stack.1, 0 :: (store (<vscale x 1 x s16>) into %stack.1)
+ RET_ReallyLR
+...
+---
+
+# Stack realignment is not supported with split-sve-objects, so we fallback to
+# the default hazard padding implementation. This does not prevent hazards
+# between ZPRs and PPRs (TODO: support this case).
+#
+# +----------+
+# | lr, fp | // frame record
+# |----------|
+# |//////////| // hazard padding (1024 bytes)
+# |----------|
+# | %stack.0 | // scalable predicate of n * 12 bytes, aligned to 16 bytes
+# | | // to be materialized with 1*ADDVL (<=> n * 16 bytes)
+# +----------+
+# | %stack.0 | // scalable SVE object of n * 18 bytes, aligned to 16 bytes,
+# | | // to be materialized with 2*ADDVL (<=> 2 * n * 16 bytes)
+# +----------+
+# |//////////| // hazard padding (1024 bytes)
+# |----------|
+# | %stack.1 | // not scalable
+# +----------+ <- SP
+
+name: test_allocate_split_sve_realigned
+stack:
+ - { id: 0, stack-id: scalable-vector, size: 18, alignment: 2 }
+ - { id: 1, stack-id: scalable-vector, size: 12, alignment: 2 }
+ - { id: 2, stack-id: default, size: 16, alignment: 32 }
+body: |
+ bb.0.entry:
+ liveins: $z0, $p0
+ STR_ZXI $z0, %stack.0, 0 :: (store (<vscale x 1 x s128>) into %stack.0)
+ STR_PXI $p0, %stack.1, 0 :: (store (<vscale x 1 x s16>) into %stack.1)
+ RET_ReallyLR
+
+# CHECK-LABEL: name: test_allocate_split_sve_realigned
+# CHECK: stackSize: 2080
+
+# CHECK: bb.0.entry:
+# CHECK: liveins: $z0, $p0, $lr
+# CHECK: $sp = frame-setup SUBXri $sp, 1040, 0
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 1040
+# CHECK-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.5)
+# CHECK-NEXT: frame-setup STRXui killed $lr, $sp, 129 :: (store (s64) into %stack.4)
+# CHECK-NEXT: $fp = frame-setup ADDXri $sp, 1024, 0
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa $w29, 16
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w30, -8
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
+# CHECK-NEXT: $[[TMP:x[0-9]+]] = frame-setup SUBXri $sp, 1040, 0
+# CHECK-NEXT: $[[TMP]] = frame-setup ADDVL_XXI $[[TMP]], -2, implicit $vg
+# CHECK-NEXT: $sp = frame-setup ANDXri killed $x9, 7930
+#
+# CHECK-NEXT: $x8 = SUBXri $fp, 1024, 0
+# CHECK-NEXT: $x8 = ADDPL_XXI $x8, -1, implicit $vg
+# CHECK-NEXT: STR_ZXI $z0, killed $x8, -1 :: (store (<vscale x 1 x s128>) into %stack.0)
+# CHECK-NEXT: $x8 = SUBXri $fp, 1024, 0
+# CHECK-NEXT: STR_PXI $p0, killed $x8, -15 :: (store (<vscale x 1 x s16>) into %stack.1)
+#
+# CHECK-NEXT: $sp = frame-destroy SUBXri $fp, 1024, 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 1040
+# CHECK-NEXT: $lr = frame-destroy LDRXui $sp, 129 :: (load (s64) from %stack.4)
+# CHECK-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.5)
+# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w30
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29
+# CHECK-NEXT: RET_ReallyLR
+
+# ASM-LABEL: test_allocate_split_sve_realigned
+# ASM: sub sp, sp, #1040
+# ASM-NEXT: .cfi_def_cfa_offset 1040
+# ASM-NEXT: str x29, [sp, #1024]
+# ASM-NEXT: str x30, [sp, #1032]
+# ASM-NEXT: add x29, sp, #1024
+# ASM-NEXT: .cfi_def_cfa w29, 16
+# ASM-NEXT: .cfi_offset w30, -8
+# ASM-NEXT: .cfi_offset w29, -16
+#
+# ASM: sub sp, x29, #1024
+# ASM-NEXT: .cfi_def_cfa wsp, 1040
+# ASM-NEXT: ldr x30, [sp, #1032]
+# ASM-NEXT: ldr x29, [sp, #1024]
+# ASM-NEXT: add sp, sp, #1040
+# ASM-NEXT: .cfi_def_cfa_offset 0
+# ASM-NEXT: .cfi_restore w30
+# ASM-NEXT: .cfi_restore w29
+
+# UNWINDINFO: DW_CFA_def_cfa_offset: +1040
+# UNWINDINFO: DW_CFA_def_cfa: reg29 +16
+# UNWINDINFO-NEXT: DW_CFA_offset: reg30 -8
+# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
+#
+# UNWINDINFO: DW_CFA_def_cfa: reg31 +1040
+# UNWINDINFO: DW_CFA_def_cfa_offset: +0
+# UNWINDINFO-NEXT: DW_CFA_restore: reg30
+# UNWINDINFO-NEXT: DW_CFA_restore: reg29
+...
+---
+
+# +----------+
+# |scratchreg| // x29 is used as scratch reg.
+# +----------+
+# | %stack.2 | // scalable predicate @ SP + 2064b + 46 scalable bytes
+# |----------|
+# |//////////| // hazard padding (1024 bytes) -- part of PPR locals area
+# |//////////| // Note: This is currently not included in the "stackSize"
+# |----------|
+# | %stack.0 | // scalable vector @ SP + 1040b + 16 scalable bytes
+# | %stack.1 | // scalable vector @ SP + 1040b
+# +----------+
+# |//////////| // hazard padding (1024 bytes)
+# |----------|
+# | %stack.3 | // not scalable
+# +----------+ <- SP
+
+# CHECK-LABEL: name: test_address_split_sve
+# CHECK: stackSize: 1056
+
+# CHECK: bb.0.entry:
+# CHECK-NEXT: liveins:
+# CHECK-NEXT: {{ $}}
+# CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.5)
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
+# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 1040
+# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22
+#
+# CHECK-NEXT: $[[TMP:x[0-9]+]] = ADDXri $sp, 1040, 0
+# CHECK-NEXT: STR_ZXI $z0, killed $[[TMP]], 1
+# CHECK-NEXT: $[[TMP:x[0-9]+]] = ADDXri $sp, 1040, 0
+# CHECK-NEXT: STR_ZXI $z1, killed $[[TMP]], 0
+# CHECK-NEXT: $[[TMP:x[0-9]+]] = ADDXri $sp, 2064, 0
+# CHECK-NEXT: STR_PXI $p0, killed $[[TMP]], 23
+#
+# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0xa0, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 1056
+# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 16
+# CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.5)
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29
+# CHECK-NEXT: RET_ReallyLR
+
+# ASM-LABEL: test_address_split_sve
+# ASM: str x29, [sp, #-16]!
+# ASM-NEXT: .cfi_def_cfa_offset 16
+# ASM-NEXT: .cfi_offset w29, -16
+# ASM-NEXT: sub sp, sp, #1024
+# ASM-NEXT: .cfi_def_cfa_offset 1040
+# ASM-NEXT: addvl sp, sp, #-1
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 1040 + 8 * VG
+# ASM-NEXT: sub sp, sp, #1040
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG
+# ASM-NEXT: addvl sp, sp, #-2
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x48, 0x1e, 0x22 // sp + 2080 + 24 * VG
+#
+# ASM: addvl sp, sp, #2
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG
+# ASM-NEXT: add sp, sp, #1024
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 1056 + 8 * VG
+# ASM-NEXT: addvl sp, sp, #1
+# ASM-NEXT: .cfi_def_cfa wsp, 1056
+# ASM-NEXT: add sp, sp, #1040
+# ASM-NEXT: .cfi_def_cfa_offset 16
+# ASM-NEXT: ldr x29, [sp], #16
+# ASM-NEXT: .cfi_def_cfa_offset 0
+# ASM-NEXT: .cfi_restore w29
+
+# UNWINDINFO: DW_CFA_def_cfa_offset: +16
+# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
+# UNWINDINFO: DW_CFA_def_cfa_offset: +1040
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1040, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit24, DW_OP_mul, DW_OP_plus
+#
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2080, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1056, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa: reg31 +1056
+# UNWINDINFO: DW_CFA_def_cfa_offset: +16
+# UNWINDINFO: DW_CFA_def_cfa_offset: +0
+# UNWINDINFO-NEXT: DW_CFA_restore: reg29
+
+name: test_address_split_sve
+frameInfo:
+ maxAlignment: 16
+stack:
+ - { id: 0, stack-id: scalable-vector, size: 16, alignment: 8 }
+ - { id: 1, stack-id: scalable-vector, size: 16, alignment: 8 }
+ - { id: 2, stack-id: scalable-vector, size: 2, alignment: 2 }
+ - { id: 3, stack-id: default, size: 16, alignment: 8 }
+body: |
+ bb.0.entry:
+ liveins: $z0, $z1, $p0
+
+ STR_ZXI $z0, %stack.0, 0 :: (store (<vscale x 1 x s128>) into %stack.0)
+ STR_ZXI $z1, %stack.1, 0 :: (store (<vscale x 1 x s128>) into %stack.1)
+ STR_PXI $p0, %stack.2, 0 :: (store (<vscale x 1 x s16>) into %stack.2)
+
+ RET_ReallyLR
+...
+---
+# +----------+
+# | lr, fp | // frame record
+# +----------+ <- FP
+# | %stack.2 | // scalable predicate @ FP - 2 scalable bytes
+# |----------|
+# |//////////| // hazard padding (1024 bytes) -- part of PPR locals area
+# |//////////| // Note: This is currently not included in the "stackSize"
+# |----------|
+# | %stack.0 | // scalable vector @ FP - 1024b - 32 scalable bytes
+# | %stack.1 | // scalable vector @ FP - 1024b - 48 scalable bytes
+# +----------+
+# |//////////| // hazard padding (1024 bytes)
+# |----------|
+# | %stack.3 | // not scalable
+# +----------+ <- SP
+
+# CHECK-LABEL: name: test_address_split_sve_fp
+# CHECK: stackSize: 1056
+#
+# CHECK: bb.0.entry:
+# CHECK-NEXT: liveins:
+# CHECK-NEXT: {{ $}}
+# CHECK-NEXT: early-clobber $sp = frame-setup STPXpre killed $fp, killed $lr, $sp, -2 :: (store (s64) into %stack.6), (store (s64) into %stack.5)
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+# CHECK-NEXT: $fp = frame-setup ADDXri $sp, 0, 0
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa $w29, 16
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w30, -8
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
+# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0
+# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg
+# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0
+# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg
+#
+# CHECK-NEXT: $[[TMP:x[0-9]+]] = SUBXri $fp, 1024, 0
+# CHECK-NEXT: STR_ZXI $z0, killed $[[TMP]], -2
+# CHECK-NEXT: $[[TMP:x[0-9]+]] = SUBXri $fp, 1024, 0
+# CHECK-NEXT: STR_ZXI $z1, killed $[[TMP]], -3
+# CHECK-NEXT: STR_PXI $p0, $fp, -1
+#
+# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg
+# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0
+# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg
+# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
+# CHECK-NEXT: early-clobber $sp, $fp, $lr = frame-destroy LDPXpost $sp, 2 :: (load (s64) from %stack.6), (load (s64) from %stack.5)
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w30
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29
+# CHECK-NEXT: RET_ReallyLR
+
+# ASM-LABEL: test_address_split_sve_fp
+# ASM: stp x29, x30, [sp, #-16]!
+# ASM-NEXT: .cfi_def_cfa_offset 16
+# ASM-NEXT: mov x29, sp
+# ASM-NEXT: .cfi_def_cfa w29, 16
+# ASM-NEXT: .cfi_offset w30, -8
+# ASM-NEXT: .cfi_offset w29, -16
+# ASM-NEXT: sub sp, sp, #1024
+# ASM-NEXT: addvl sp, sp, #-1
+# ASM-NEXT: sub sp, sp, #1040
+# ASM-NEXT: addvl sp, sp, #-2
+#
+# ASM: addvl sp, sp, #2
+# ASM-NEXT: add sp, sp, #1024
+# ASM-NEXT: addvl sp, sp, #1
+# ASM-NEXT: add sp, sp, #1040
+# ASM-NEXT: .cfi_def_cfa wsp, 16
+# ASM-NEXT: ldp x29, x30, [sp], #16
+# ASM-NEXT: .cfi_def_cfa_offset 0
+# ASM-NEXT: .cfi_restore w30
+# ASM-NEXT: .cfi_restore w29
+
+# UNWINDINFO: DW_CFA_def_cfa_offset: +16
+# UNWINDINFO: DW_CFA_def_cfa: reg29 +16
+# UNWINDINFO-NEXT: DW_CFA_offset: reg30 -8
+# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
+#
+# UNWINDINFO: DW_CFA_def_cfa: reg31 +16
+# UNWINDINFO: DW_CFA_def_cfa_offset: +0
+# UNWINDINFO-NEXT: DW_CFA_restore: reg30
+# UNWINDINFO-NEXT: DW_CFA_restore: reg29
+
+name: test_address_split_sve_fp
+frameInfo:
+ maxAlignment: 16
+ isFrameAddressTaken: true
+stack:
+ - { id: 0, stack-id: scalable-vector, size: 16, alignment: 8 }
+ - { id: 1, stack-id: scalable-vector, size: 16, alignment: 8 }
+ - { id: 2, stack-id: scalable-vector, size: 2, alignment: 2 }
+ - { id: 3, stack-id: default, size: 16, alignment: 8 }
+body: |
+ bb.0.entry:
+ liveins: $z0, $z1, $p0
+
+ STR_ZXI $z0, %stack.0, 0 :: (store (<vscale x 1 x s128>) into %stack.0)
+ STR_ZXI $z1, %stack.1, 0 :: (store (<vscale x 1 x s128>) into %stack.1)
+ STR_PXI $p0, %stack.2, 0 :: (store (<vscale x 1 x s16>) into %stack.2)
+
+ RET_ReallyLR
+...
+---
+# CHECK-LABEL: name: save_restore_ppr_zpr
+# CHECK: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.8)
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $w29, -16
+# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -1, implicit $vg
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+# CHECK-NEXT: frame-setup STR_PXI killed $p6, $sp, 5 :: (store (s16) into %stack.7)
+# CHECK-NEXT: frame-setup STR_PXI killed $p5, $sp, 6 :: (store (s16) into %stack.6)
+# CHECK-NEXT: frame-setup STR_PXI killed $p4, $sp, 7 :: (store (s16) into %stack.5)
+#
+# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+#
+# CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -3, implicit $vg
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0a, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22
+# CHECK-NEXT: frame-setup STR_ZXI killed $z10, $sp, 0 :: (store (s128) into %stack.4)
+# CHECK-NEXT: frame-setup STR_ZXI killed $z9, $sp, 1 :: (store (s128) into %stack.3)
+# CHECK-NEXT: frame-setup STR_ZXI killed $z8, $sp, 2 :: (store (s128) into %stack.2)
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22
+# CHECK-NEXT: $sp = frame-setup SUBXri $sp, 1056, 0
+# CHECK-NEXT: frame-setup CFI_INSTRUCTION escape 0x0f, 0x0a, 0x8f, 0xb0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22
+#
+#
+# CHECK: $sp = frame-destroy ADDXri $sp, 1056, 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0a, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22
+# CHECK-NEXT: $z10 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.4)
+# CHECK-NEXT: $z9 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.3)
+# CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2 :: (load (s128) from %stack.2)
+#
+# CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22
+#
+# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 3, implicit $vg
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z8
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z9
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z10
+# CHECK-NEXT: $p6 = frame-destroy LDR_PXI $sp, 5 :: (load (s16) from %stack.7)
+# CHECK-NEXT: $p5 = frame-destroy LDR_PXI $sp, 6 :: (load (s16) from %stack.6)
+# CHECK-NEXT: $p4 = frame-destroy LDR_PXI $sp, 7 :: (load (s16) from %stack.5)
+# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1, implicit $vg
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
+# CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.8)
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa_offset 0
+# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $w29
+# CHECK-NEXT: RET_ReallyLR
+
+# ASM-LABEL: save_restore_ppr_zpr:
+# ASM: str x29, [sp, #-16]!
+# ASM-NEXT: .cfi_def_cfa_offset 16
+# ASM-NEXT: .cfi_offset w29, -16
+# ASM-NEXT: addvl sp, sp, #-1
+# ASM-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
+# ASM-NEXT: str p6, [sp, #5, mul vl]
+# ASM-NEXT: str p5, [sp, #6, mul vl]
+# ASM-NEXT: str p4, [sp, #7, mul vl]
+# ASM-NEXT: sub sp, sp, #1024
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 1040 + 8 * VG
+# ASM-NEXT: addvl sp, sp, #-3
+# ASM-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 1040 + 32 * VG
+# ASM-NEXT: str z10, [sp]
+# ASM-NEXT: str z9, [sp, #1, mul vl]
+# ASM-NEXT: str z8, [sp, #2, mul vl]
+# ASM-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 16 * VG - 1040
+# ASM-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d9 @ cfa - 24 * VG - 1040
+# ASM-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d10 @ cfa - 32 * VG - 1040
+# ASM-NEXT: sub sp, sp, #1056
+# ASM-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0xb0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 2096 + 32 * VG
+#
+# ASM: add sp, sp, #1056
+# ASM-NEXT: .cfi_escape 0x0f, 0x0a, 0x8f, 0x90, 0x08, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 1040 + 32 * VG
+# ASM-NEXT: ldr z10, [sp]
+# ASM-NEXT: ldr z9, [sp, #1, mul vl]
+# ASM-NEXT: ldr z8, [sp, #2, mul vl]
+# ASM-NEXT: add sp, sp, #1024
+# ASM-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x11, 0x20, 0x1e, 0x22 // sp + 16 + 32 * VG
+# ASM-NEXT: addvl sp, sp, #3
+# ASM-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
+# ASM-NEXT: .cfi_restore z8
+# ASM-NEXT: .cfi_restore z9
+# ASM-NEXT: .cfi_restore z10
+# ASM-NEXT: ldr p6, [sp, #5, mul vl]
+# ASM-NEXT: ldr p5, [sp, #6, mul vl]
+# ASM-NEXT: ldr p4, [sp, #7, mul vl]
+# ASM-NEXT: addvl sp, sp, #1
+# ASM-NEXT: .cfi_def_cfa wsp, 16
+# ASM-NEXT: ldr x29, [sp], #16
+# ASM-NEXT: .cfi_def_cfa_offset 0
+# ASM-NEXT: .cfi_restore w29
+
+# UNWINDINFO: DW_CFA_def_cfa_offset: +16
+# UNWINDINFO-NEXT: DW_CFA_offset: reg29 -16
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1040, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1040, DW_OP_bregx 0x2e +0, DW_OP_consts +32, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_expression: reg72 DW_OP_bregx 0x2e +0, DW_OP_consts -16, DW_OP_mul, DW_OP_plus, DW_OP_consts -1040, DW_OP_plus
+# UNWINDINFO: DW_CFA_expression: reg73 DW_OP_bregx 0x2e +0, DW_OP_consts -24, DW_OP_mul, DW_OP_plus, DW_OP_consts -1040, DW_OP_plus
+# UNWINDINFO: DW_CFA_expression: reg74 DW_OP_bregx 0x2e +0, DW_OP_consts -32, DW_OP_mul, DW_OP_plus, DW_OP_consts -1040, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +2096, DW_OP_bregx 0x2e +0, DW_OP_consts +32, DW_OP_mul, DW_OP_plus
+#
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +1040, DW_OP_bregx 0x2e +0, DW_OP_consts +32, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_consts +32, DW_OP_mul, DW_OP_plus
+# UNWINDINFO: DW_CFA_def_cfa_expression: DW_OP_breg31 +16, DW_OP_bregx 0x2e +0, DW_OP_lit8, DW_OP_mul, DW_OP_plus
+# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg104
+# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg105
+# UNWINDINFO-NEXT: DW_CFA_restore_extended: reg106
+# UNWINDINFO: DW_CFA_def_cfa: reg31 +16
+# UNWINDINFO: DW_CFA_def_cfa_offset: +0
+# UNWINDINFO-NEXT: DW_CFA_restore: reg29
+
+name: save_restore_ppr_zpr
+stack:
+ - { id: 0, stack-id: default, size: 32, alignment: 16 }
+body: |
+ bb.0.entry:
+
+ $p4 = IMPLICIT_DEF
+ $p5 = IMPLICIT_DEF
+ $p6 = IMPLICIT_DEF
+ $z8 = IMPLICIT_DEF
+ $z9 = IMPLICIT_DEF
+ $z10 = IMPLICIT_DEF
+
+ RET_ReallyLR
diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve.mir b/llvm/test/CodeGen/AArch64/framelayout-sve.mir
index 03a6aab..1101416 100644
--- a/llvm/test/CodeGen/AArch64/framelayout-sve.mir
+++ b/llvm/test/CodeGen/AArch64/framelayout-sve.mir
@@ -1215,19 +1215,19 @@ body: |
# CHECK: - { id: 2, name: '', type: default, offset: -112, size: 16, alignment: 16,
# CHECK-NEXT: stack-id: scalable-vector,
# CHECK: - { id: 3, name: '', type: default, offset: -114, size: 2, alignment: 2,
-# CHECK-NEXT: stack-id: scalable-vector,
+# CHECK-NEXT: stack-id: scalable-predicate-vector,
# CHECK: - { id: 4, name: '', type: spill-slot, offset: -144, size: 16, alignment: 16,
# CHECK-NEXT: stack-id: scalable-vector,
# CHECK: - { id: 5, name: '', type: spill-slot, offset: -146, size: 2, alignment: 2,
-# CHECK-NEXT: stack-id: scalable-vector,
+# CHECK-NEXT: stack-id: scalable-predicate-vector,
# CHECK: - { id: 6, name: '', type: spill-slot, offset: -16, size: 16, alignment: 16,
# CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '$z8',
# CHECK: - { id: 7, name: '', type: spill-slot, offset: -32, size: 16, alignment: 16,
# CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '$z23',
# CHECK: - { id: 8, name: '', type: spill-slot, offset: -34, size: 2, alignment: 2,
-# CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '$p4',
+# CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: '$p4',
# CHECK: - { id: 9, name: '', type: spill-slot, offset: -36, size: 2, alignment: 2,
-# CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: '$p15',
+# CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: '$p15',
# CHECK: - { id: 10, name: '', type: spill-slot, offset: -16, size: 8, alignment: 16,
# CHECK-NEXT: stack-id: default, callee-saved-register: '$fp',
#
@@ -1295,9 +1295,9 @@ stack:
- { id: 0, type: default, size: 32, alignment: 16, stack-id: scalable-vector }
- { id: 1, type: default, size: 4, alignment: 2, stack-id: scalable-vector }
- { id: 2, type: default, size: 16, alignment: 16, stack-id: scalable-vector }
- - { id: 3, type: default, size: 2, alignment: 2, stack-id: scalable-vector }
+ - { id: 3, type: default, size: 2, alignment: 2, stack-id: scalable-predicate-vector }
- { id: 4, type: spill-slot, size: 16, alignment: 16, stack-id: scalable-vector }
- - { id: 5, type: spill-slot, size: 2, alignment: 2, stack-id: scalable-vector }
+ - { id: 5, type: spill-slot, size: 2, alignment: 2, stack-id: scalable-predicate-vector }
body: |
bb.0.entry:
diff --git a/llvm/test/CodeGen/AArch64/freeze.ll b/llvm/test/CodeGen/AArch64/freeze.ll
index fae3bbe..fb909fe 100644
--- a/llvm/test/CodeGen/AArch64/freeze.ll
+++ b/llvm/test/CodeGen/AArch64/freeze.ll
@@ -466,15 +466,12 @@ define <8 x i16> @freeze_urhadd(<8 x i16> %a0, <8 x i16> %a1) {
ret <8 x i16> %masked
}
-; TODO: Unnecessary sext_inreg
define <8 x i16> @freeze_shadd(<8 x i8> %a0, <8 x i16> %a1) {
; CHECK-LABEL: freeze_shadd:
; CHECK: // %bb.0:
; CHECK-NEXT: sshll v0.8h, v0.8b, #0
; CHECK-NEXT: sshr v1.8h, v1.8h, #8
; CHECK-NEXT: shadd v0.8h, v0.8h, v1.8h
-; CHECK-NEXT: shl v0.8h, v0.8h, #8
-; CHECK-NEXT: sshr v0.8h, v0.8h, #8
; CHECK-NEXT: ret
%x0 = sext <8 x i8> %a0 to <8 x i16>
%x1 = ashr <8 x i16> %a1, splat (i16 8)
@@ -485,15 +482,12 @@ define <8 x i16> @freeze_shadd(<8 x i8> %a0, <8 x i16> %a1) {
ret <8 x i16> %sext
}
-; TODO: Unnecessary sext_inreg
define <8 x i16> @freeze_srhadd(<8 x i8> %a0, <8 x i16> %a1) {
; CHECK-LABEL: freeze_srhadd:
; CHECK: // %bb.0:
; CHECK-NEXT: sshll v0.8h, v0.8b, #0
; CHECK-NEXT: sshr v1.8h, v1.8h, #8
; CHECK-NEXT: srhadd v0.8h, v0.8h, v1.8h
-; CHECK-NEXT: shl v0.8h, v0.8h, #8
-; CHECK-NEXT: sshr v0.8h, v0.8h, #8
; CHECK-NEXT: ret
%x0 = sext <8 x i8> %a0 to <8 x i16>
%x1 = ashr <8 x i16> %a1, splat (i16 8)
diff --git a/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll b/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll
index b89f551..e2c861b 100644
--- a/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll
+++ b/llvm/test/CodeGen/AArch64/get-active-lane-mask-extract.ll
@@ -327,9 +327,6 @@ define void @test_2x8bit_mask_with_extracts_and_ptest(i64 %i, i64 %n) {
; CHECK-SVE2p1-SME2-LABEL: test_2x8bit_mask_with_extracts_and_ptest:
; CHECK-SVE2p1-SME2: // %bb.0: // %entry
; CHECK-SVE2p1-SME2-NEXT: whilelo { p0.h, p1.h }, x0, x1
-; CHECK-SVE2p1-SME2-NEXT: ptrue p2.b
-; CHECK-SVE2p1-SME2-NEXT: uzp1 p3.b, p0.b, p1.b
-; CHECK-SVE2p1-SME2-NEXT: ptest p2, p3.b
; CHECK-SVE2p1-SME2-NEXT: b.pl .LBB11_2
; CHECK-SVE2p1-SME2-NEXT: // %bb.1: // %if.then
; CHECK-SVE2p1-SME2-NEXT: b use
@@ -368,9 +365,6 @@ define void @test_2x8bit_mask_with_extracts_and_reinterpret_casts(i64 %i, i64 %n
; CHECK-SVE2p1-SME2-LABEL: test_2x8bit_mask_with_extracts_and_reinterpret_casts:
; CHECK-SVE2p1-SME2: // %bb.0: // %entry
; CHECK-SVE2p1-SME2-NEXT: whilelo { p0.s, p1.s }, x0, x1
-; CHECK-SVE2p1-SME2-NEXT: ptrue p2.h
-; CHECK-SVE2p1-SME2-NEXT: uzp1 p3.h, p0.h, p1.h
-; CHECK-SVE2p1-SME2-NEXT: ptest p2, p3.b
; CHECK-SVE2p1-SME2-NEXT: b.pl .LBB12_2
; CHECK-SVE2p1-SME2-NEXT: // %bb.1: // %if.then
; CHECK-SVE2p1-SME2-NEXT: b use
@@ -413,14 +407,9 @@ define void @test_4x4bit_mask_with_extracts_and_ptest(i64 %i, i64 %n) {
; CHECK-SVE2p1-SME2-NEXT: adds x8, x0, x8
; CHECK-SVE2p1-SME2-NEXT: csinv x8, x8, xzr, lo
; CHECK-SVE2p1-SME2-NEXT: whilelo { p0.s, p1.s }, x0, x1
-; CHECK-SVE2p1-SME2-NEXT: whilelo { p2.s, p3.s }, x8, x1
-; CHECK-SVE2p1-SME2-NEXT: uzp1 p4.h, p0.h, p1.h
-; CHECK-SVE2p1-SME2-NEXT: uzp1 p5.h, p2.h, p3.h
-; CHECK-SVE2p1-SME2-NEXT: uzp1 p4.b, p4.b, p5.b
-; CHECK-SVE2p1-SME2-NEXT: ptrue p5.b
-; CHECK-SVE2p1-SME2-NEXT: ptest p5, p4.b
; CHECK-SVE2p1-SME2-NEXT: b.pl .LBB13_2
; CHECK-SVE2p1-SME2-NEXT: // %bb.1: // %if.then
+; CHECK-SVE2p1-SME2-NEXT: whilelo { p2.s, p3.s }, x8, x1
; CHECK-SVE2p1-SME2-NEXT: b use
; CHECK-SVE2p1-SME2-NEXT: .LBB13_2: // %if.end
; CHECK-SVE2p1-SME2-NEXT: ret
@@ -463,14 +452,9 @@ define void @test_4x2bit_mask_with_extracts_and_reinterpret_casts(i64 %i, i64 %n
; CHECK-SVE2p1-SME2-NEXT: adds x8, x0, x8
; CHECK-SVE2p1-SME2-NEXT: csinv x8, x8, xzr, lo
; CHECK-SVE2p1-SME2-NEXT: whilelo { p0.d, p1.d }, x0, x1
-; CHECK-SVE2p1-SME2-NEXT: whilelo { p2.d, p3.d }, x8, x1
-; CHECK-SVE2p1-SME2-NEXT: uzp1 p4.s, p0.s, p1.s
-; CHECK-SVE2p1-SME2-NEXT: uzp1 p5.s, p2.s, p3.s
-; CHECK-SVE2p1-SME2-NEXT: uzp1 p4.h, p4.h, p5.h
-; CHECK-SVE2p1-SME2-NEXT: ptrue p5.h
-; CHECK-SVE2p1-SME2-NEXT: ptest p5, p4.b
; CHECK-SVE2p1-SME2-NEXT: b.pl .LBB14_2
; CHECK-SVE2p1-SME2-NEXT: // %bb.1: // %if.then
+; CHECK-SVE2p1-SME2-NEXT: whilelo { p2.d, p3.d }, x8, x1
; CHECK-SVE2p1-SME2-NEXT: b use
; CHECK-SVE2p1-SME2-NEXT: .LBB14_2: // %if.end
; CHECK-SVE2p1-SME2-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/llvm.modf.ll b/llvm/test/CodeGen/AArch64/llvm.modf.ll
index 41fe796..503742f 100644
--- a/llvm/test/CodeGen/AArch64/llvm.modf.ll
+++ b/llvm/test/CodeGen/AArch64/llvm.modf.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=aarch64-gnu-linux < %s | FileCheck -check-prefixes=CHECK %s
+; RUN: llc -mtriple=aarch64-gnu-linux < %s | FileCheck -check-prefixes=CHECK,CHECK-SD %s
+; RUN: llc -mtriple=aarch64-gnu-linux -global-isel < %s | FileCheck -check-prefixes=CHECK,CHECK-GI %s
define { half, half } @test_modf_f16(half %a) {
; CHECK-LABEL: test_modf_f16:
@@ -55,61 +56,95 @@ define half @test_modf_f16_only_use_integral_part(half %a) {
}
define { <2 x half>, <2 x half> } @test_modf_v2f16(<2 x half> %a) {
-; CHECK-LABEL: test_modf_v2f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #64
-; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 64
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: mov h1, v0.h[1]
-; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: add x0, sp, #44
-; CHECK-NEXT: fcvt s0, h1
-; CHECK-NEXT: bl modff
-; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
-; CHECK-NEXT: fcvt h0, s0
-; CHECK-NEXT: add x0, sp, #40
-; CHECK-NEXT: fcvt s1, h1
-; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: fmov s0, s1
-; CHECK-NEXT: bl modff
-; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
-; CHECK-NEXT: fcvt h2, s0
-; CHECK-NEXT: add x0, sp, #56
-; CHECK-NEXT: mov h1, v1.h[2]
-; CHECK-NEXT: fcvt s0, h1
-; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: mov v2.h[1], v1.h[0]
-; CHECK-NEXT: str q2, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: bl modff
-; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
-; CHECK-NEXT: fcvt h2, s0
-; CHECK-NEXT: add x0, sp, #60
-; CHECK-NEXT: mov h1, v1.h[3]
-; CHECK-NEXT: fcvt s0, h1
-; CHECK-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: mov v1.h[2], v2.h[0]
-; CHECK-NEXT: str q1, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: bl modff
-; CHECK-NEXT: ldp s2, s1, [sp, #40]
-; CHECK-NEXT: fcvt h4, s0
-; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
-; CHECK-NEXT: fcvt h3, s1
-; CHECK-NEXT: fcvt h1, s2
-; CHECK-NEXT: ldr s2, [sp, #56]
-; CHECK-NEXT: mov v0.h[3], v4.h[0]
-; CHECK-NEXT: fcvt h2, s2
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: mov v1.h[1], v3.h[0]
-; CHECK-NEXT: ldr s3, [sp, #60]
-; CHECK-NEXT: mov v1.h[2], v2.h[0]
-; CHECK-NEXT: fcvt h2, s3
-; CHECK-NEXT: mov v1.h[3], v2.h[0]
-; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1
-; CHECK-NEXT: add sp, sp, #64
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_modf_v2f16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub sp, sp, #64
+; CHECK-SD-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 64
+; CHECK-SD-NEXT: .cfi_offset w30, -16
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: mov h1, v0.h[1]
+; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT: add x0, sp, #44
+; CHECK-SD-NEXT: fcvt s0, h1
+; CHECK-SD-NEXT: bl modff
+; CHECK-SD-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT: fcvt h0, s0
+; CHECK-SD-NEXT: add x0, sp, #40
+; CHECK-SD-NEXT: fcvt s1, h1
+; CHECK-SD-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-SD-NEXT: fmov s0, s1
+; CHECK-SD-NEXT: bl modff
+; CHECK-SD-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT: fcvt h2, s0
+; CHECK-SD-NEXT: add x0, sp, #56
+; CHECK-SD-NEXT: mov h1, v1.h[2]
+; CHECK-SD-NEXT: fcvt s0, h1
+; CHECK-SD-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT: mov v2.h[1], v1.h[0]
+; CHECK-SD-NEXT: str q2, [sp, #16] // 16-byte Folded Spill
+; CHECK-SD-NEXT: bl modff
+; CHECK-SD-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT: fcvt h2, s0
+; CHECK-SD-NEXT: add x0, sp, #60
+; CHECK-SD-NEXT: mov h1, v1.h[3]
+; CHECK-SD-NEXT: fcvt s0, h1
+; CHECK-SD-NEXT: ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT: mov v1.h[2], v2.h[0]
+; CHECK-SD-NEXT: str q1, [sp, #16] // 16-byte Folded Spill
+; CHECK-SD-NEXT: bl modff
+; CHECK-SD-NEXT: ldp s2, s1, [sp, #40]
+; CHECK-SD-NEXT: fcvt h4, s0
+; CHECK-SD-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
+; CHECK-SD-NEXT: fcvt h3, s1
+; CHECK-SD-NEXT: fcvt h1, s2
+; CHECK-SD-NEXT: ldr s2, [sp, #56]
+; CHECK-SD-NEXT: mov v0.h[3], v4.h[0]
+; CHECK-SD-NEXT: fcvt h2, s2
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: mov v1.h[1], v3.h[0]
+; CHECK-SD-NEXT: ldr s3, [sp, #60]
+; CHECK-SD-NEXT: mov v1.h[2], v2.h[0]
+; CHECK-SD-NEXT: fcvt h2, s3
+; CHECK-SD-NEXT: mov v1.h[3], v2.h[0]
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-SD-NEXT: add sp, sp, #64
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_modf_v2f16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: sub sp, sp, #64
+; CHECK-GI-NEXT: str d8, [sp, #48] // 8-byte Folded Spill
+; CHECK-GI-NEXT: str x30, [sp, #56] // 8-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 64
+; CHECK-GI-NEXT: .cfi_offset w30, -8
+; CHECK-GI-NEXT: .cfi_offset b8, -16
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov h8, v0.h[1]
+; CHECK-GI-NEXT: add x0, sp, #40
+; CHECK-GI-NEXT: fcvt s0, h0
+; CHECK-GI-NEXT: bl modff
+; CHECK-GI-NEXT: fcvt h0, s0
+; CHECK-GI-NEXT: ldr s1, [sp, #40]
+; CHECK-GI-NEXT: add x0, sp, #44
+; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-NEXT: fcvt h0, s1
+; CHECK-GI-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: fcvt s0, h8
+; CHECK-GI-NEXT: bl modff
+; CHECK-GI-NEXT: ldr s1, [sp, #44]
+; CHECK-GI-NEXT: fcvt h3, s0
+; CHECK-GI-NEXT: ldr x30, [sp, #56] // 8-byte Folded Reload
+; CHECK-GI-NEXT: ldr d8, [sp, #48] // 8-byte Folded Reload
+; CHECK-GI-NEXT: fcvt h2, s1
+; CHECK-GI-NEXT: ldp q0, q1, [sp] // 32-byte Folded Reload
+; CHECK-GI-NEXT: mov v0.h[1], v3.h[0]
+; CHECK-GI-NEXT: mov v1.h[1], v2.h[0]
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-GI-NEXT: add sp, sp, #64
+; CHECK-GI-NEXT: ret
%result = call { <2 x half>, <2 x half> } @llvm.modf.v2f16(<2 x half> %a)
ret { <2 x half>, <2 x half> } %result
}
@@ -130,80 +165,156 @@ define { float, float } @test_modf_f32(float %a) {
}
define { <3 x float>, <3 x float> } @test_modf_v3f32(<3 x float> %a) {
-; CHECK-LABEL: test_modf_v3f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #80
-; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
-; CHECK-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 80
-; CHECK-NEXT: .cfi_offset w19, -8
-; CHECK-NEXT: .cfi_offset w20, -16
-; CHECK-NEXT: .cfi_offset w30, -32
-; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: mov s0, v0.s[1]
-; CHECK-NEXT: add x0, sp, #56
-; CHECK-NEXT: add x19, sp, #56
-; CHECK-NEXT: bl modff
-; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
-; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: add x0, sp, #44
-; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0
-; CHECK-NEXT: bl modff
-; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
-; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
-; CHECK-NEXT: add x0, sp, #60
-; CHECK-NEXT: add x20, sp, #60
-; CHECK-NEXT: mov v0.s[1], v1.s[0]
-; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: mov s0, v0.s[2]
-; CHECK-NEXT: bl modff
-; CHECK-NEXT: ldr s1, [sp, #44]
-; CHECK-NEXT: ldr q2, [sp] // 16-byte Folded Reload
-; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
-; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
-; CHECK-NEXT: ld1 { v1.s }[1], [x19]
-; CHECK-NEXT: mov v2.s[2], v0.s[0]
-; CHECK-NEXT: ld1 { v1.s }[2], [x20]
-; CHECK-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload
-; CHECK-NEXT: mov v0.16b, v2.16b
-; CHECK-NEXT: add sp, sp, #80
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_modf_v3f32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub sp, sp, #80
+; CHECK-SD-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
+; CHECK-SD-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 80
+; CHECK-SD-NEXT: .cfi_offset w19, -8
+; CHECK-SD-NEXT: .cfi_offset w20, -16
+; CHECK-SD-NEXT: .cfi_offset w30, -32
+; CHECK-SD-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-SD-NEXT: mov s0, v0.s[1]
+; CHECK-SD-NEXT: add x0, sp, #56
+; CHECK-SD-NEXT: add x19, sp, #56
+; CHECK-SD-NEXT: bl modff
+; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT: add x0, sp, #44
+; CHECK-SD-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT: // kill: def $s0 killed $s0 killed $q0
+; CHECK-SD-NEXT: bl modff
+; CHECK-SD-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-SD-NEXT: add x0, sp, #60
+; CHECK-SD-NEXT: add x20, sp, #60
+; CHECK-SD-NEXT: mov v0.s[1], v1.s[0]
+; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT: ldr q0, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT: mov s0, v0.s[2]
+; CHECK-SD-NEXT: bl modff
+; CHECK-SD-NEXT: ldr s1, [sp, #44]
+; CHECK-SD-NEXT: ldr q2, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-SD-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
+; CHECK-SD-NEXT: ld1 { v1.s }[1], [x19]
+; CHECK-SD-NEXT: mov v2.s[2], v0.s[0]
+; CHECK-SD-NEXT: ld1 { v1.s }[2], [x20]
+; CHECK-SD-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload
+; CHECK-SD-NEXT: mov v0.16b, v2.16b
+; CHECK-SD-NEXT: add sp, sp, #80
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_modf_v3f32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: sub sp, sp, #112
+; CHECK-GI-NEXT: stp d9, d8, [sp, #80] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x30, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 112
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w30, -16
+; CHECK-GI-NEXT: .cfi_offset b8, -24
+; CHECK-GI-NEXT: .cfi_offset b9, -32
+; CHECK-GI-NEXT: add x0, sp, #68
+; CHECK-GI-NEXT: mov s8, v0.s[1]
+; CHECK-GI-NEXT: mov s9, v0.s[2]
+; CHECK-GI-NEXT: // kill: def $s0 killed $s0 killed $q0
+; CHECK-GI-NEXT: bl modff
+; CHECK-GI-NEXT: ldr s1, [sp, #68]
+; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-GI-NEXT: add x0, sp, #72
+; CHECK-GI-NEXT: stp q0, q1, [sp, #32] // 32-byte Folded Spill
+; CHECK-GI-NEXT: fmov s0, s8
+; CHECK-GI-NEXT: bl modff
+; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-NEXT: add x0, sp, #76
+; CHECK-GI-NEXT: add x19, sp, #76
+; CHECK-GI-NEXT: ldr s0, [sp, #72]
+; CHECK-GI-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: fmov s0, s9
+; CHECK-GI-NEXT: bl modff
+; CHECK-GI-NEXT: ldp q3, q2, [sp, #16] // 32-byte Folded Reload
+; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-GI-NEXT: ldr q1, [sp] // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldp d9, d8, [sp, #80] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v2.s[1], v1.s[0]
+; CHECK-GI-NEXT: ldr q1, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v1.s[1], v3.s[0]
+; CHECK-GI-NEXT: mov v2.s[2], v0.s[0]
+; CHECK-GI-NEXT: ld1 { v1.s }[2], [x19]
+; CHECK-GI-NEXT: ldp x30, x19, [sp, #96] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v0.16b, v2.16b
+; CHECK-GI-NEXT: add sp, sp, #112
+; CHECK-GI-NEXT: ret
%result = call { <3 x float>, <3 x float> } @llvm.modf.v3f32(<3 x float> %a)
ret { <3 x float>, <3 x float> } %result
}
define { <2 x float>, <2 x float> } @test_modf_v2f32(<2 x float> %a) {
-; CHECK-LABEL: test_modf_v2f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #64
-; CHECK-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 64
-; CHECK-NEXT: .cfi_offset w19, -8
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: add x0, sp, #40
-; CHECK-NEXT: add x19, sp, #40
-; CHECK-NEXT: mov s0, v0.s[1]
-; CHECK-NEXT: bl modff
-; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
-; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: add x0, sp, #44
-; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
-; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0
-; CHECK-NEXT: bl modff
-; CHECK-NEXT: ldr s1, [sp, #44]
-; CHECK-NEXT: ldr q2, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
-; CHECK-NEXT: ld1 { v1.s }[1], [x19]
-; CHECK-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload
-; CHECK-NEXT: mov v0.s[1], v2.s[0]
-; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: add sp, sp, #64
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_modf_v2f32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub sp, sp, #64
+; CHECK-SD-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 64
+; CHECK-SD-NEXT: .cfi_offset w19, -8
+; CHECK-SD-NEXT: .cfi_offset w30, -16
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT: add x0, sp, #40
+; CHECK-SD-NEXT: add x19, sp, #40
+; CHECK-SD-NEXT: mov s0, v0.s[1]
+; CHECK-SD-NEXT: bl modff
+; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-SD-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-SD-NEXT: add x0, sp, #44
+; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT: // kill: def $s0 killed $s0 killed $q0
+; CHECK-SD-NEXT: bl modff
+; CHECK-SD-NEXT: ldr s1, [sp, #44]
+; CHECK-SD-NEXT: ldr q2, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-SD-NEXT: ld1 { v1.s }[1], [x19]
+; CHECK-SD-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-SD-NEXT: mov v0.s[1], v2.s[0]
+; CHECK-SD-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: add sp, sp, #64
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_modf_v2f32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: sub sp, sp, #64
+; CHECK-GI-NEXT: str d8, [sp, #32] // 8-byte Folded Spill
+; CHECK-GI-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 64
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w30, -16
+; CHECK-GI-NEXT: .cfi_offset b8, -32
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: add x0, sp, #40
+; CHECK-GI-NEXT: mov s8, v0.s[1]
+; CHECK-GI-NEXT: // kill: def $s0 killed $s0 killed $q0
+; CHECK-GI-NEXT: bl modff
+; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-NEXT: add x0, sp, #44
+; CHECK-GI-NEXT: add x19, sp, #44
+; CHECK-GI-NEXT: ldr s0, [sp, #40]
+; CHECK-GI-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: fmov s0, s8
+; CHECK-GI-NEXT: bl modff
+; CHECK-GI-NEXT: ldp q2, q1, [sp] // 32-byte Folded Reload
+; CHECK-GI-NEXT: // kill: def $s0 killed $s0 def $q0
+; CHECK-GI-NEXT: ldr d8, [sp, #32] // 8-byte Folded Reload
+; CHECK-GI-NEXT: mov v2.s[1], v0.s[0]
+; CHECK-GI-NEXT: ld1 { v1.s }[1], [x19]
+; CHECK-GI-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-GI-NEXT: fmov d0, d2
+; CHECK-GI-NEXT: add sp, sp, #64
+; CHECK-GI-NEXT: ret
%result = call { <2 x float>, <2 x float> } @llvm.modf.v2f32(<2 x float> %a)
ret { <2 x float>, <2 x float> } %result
}
@@ -224,32 +335,80 @@ define { double, double } @test_modf_f64(double %a) {
}
define { <2 x double>, <2 x double> } @test_modf_v2f64(<2 x double> %a) {
-; CHECK-LABEL: test_modf_v2f64:
+; CHECK-SD-LABEL: test_modf_v2f64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub sp, sp, #64
+; CHECK-SD-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 64
+; CHECK-SD-NEXT: .cfi_offset w19, -8
+; CHECK-SD-NEXT: .cfi_offset w30, -16
+; CHECK-SD-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT: mov d0, v0.d[1]
+; CHECK-SD-NEXT: add x0, sp, #32
+; CHECK-SD-NEXT: add x19, sp, #32
+; CHECK-SD-NEXT: bl modf
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-SD-NEXT: add x0, sp, #40
+; CHECK-SD-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT: bl modf
+; CHECK-SD-NEXT: ldr d1, [sp, #40]
+; CHECK-SD-NEXT: ldr q2, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT: ld1 { v1.d }[1], [x19]
+; CHECK-SD-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-SD-NEXT: mov v0.d[1], v2.d[0]
+; CHECK-SD-NEXT: add sp, sp, #64
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_modf_v2f64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: sub sp, sp, #80
+; CHECK-GI-NEXT: str d8, [sp, #48] // 8-byte Folded Spill
+; CHECK-GI-NEXT: stp x30, x19, [sp, #64] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 80
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w30, -16
+; CHECK-GI-NEXT: .cfi_offset b8, -32
+; CHECK-GI-NEXT: add x0, sp, #40
+; CHECK-GI-NEXT: mov d8, v0.d[1]
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-GI-NEXT: bl modf
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-NEXT: add x0, sp, #56
+; CHECK-GI-NEXT: add x19, sp, #56
+; CHECK-GI-NEXT: ldr d0, [sp, #40]
+; CHECK-GI-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: fmov d0, d8
+; CHECK-GI-NEXT: bl modf
+; CHECK-GI-NEXT: ldp q2, q1, [sp] // 32-byte Folded Reload
+; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: ldr d8, [sp, #48] // 8-byte Folded Reload
+; CHECK-GI-NEXT: mov v2.d[1], v0.d[0]
+; CHECK-GI-NEXT: ld1 { v1.d }[1], [x19]
+; CHECK-GI-NEXT: ldp x30, x19, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v0.16b, v2.16b
+; CHECK-GI-NEXT: add sp, sp, #80
+; CHECK-GI-NEXT: ret
+ %result = call { <2 x double>, <2 x double> } @llvm.modf.v2f64(<2 x double> %a)
+ ret { <2 x double>, <2 x double> } %result
+}
+
+define { fp128, fp128 } @test_modf_fp128(fp128 %a) {
+; CHECK-LABEL: test_modf_fp128:
; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #64
-; CHECK-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 64
-; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: sub sp, sp, #32
+; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: mov d0, v0.d[1]
-; CHECK-NEXT: add x0, sp, #32
-; CHECK-NEXT: add x19, sp, #32
-; CHECK-NEXT: bl modf
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: add x0, sp, #40
-; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: bl modf
-; CHECK-NEXT: ldr d1, [sp, #40]
-; CHECK-NEXT: ldr q2, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: ld1 { v1.d }[1], [x19]
-; CHECK-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload
-; CHECK-NEXT: mov v0.d[1], v2.d[0]
-; CHECK-NEXT: add sp, sp, #64
+; CHECK-NEXT: mov x0, sp
+; CHECK-NEXT: bl modfl
+; CHECK-NEXT: ldr q1, [sp]
+; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
- %result = call { <2 x double>, <2 x double> } @llvm.modf.v2f64(<2 x double> %a)
- ret { <2 x double>, <2 x double> } %result
+ %result = call { fp128, fp128 } @llvm.modf.fp128(fp128 %a)
+ ret { fp128, fp128 } %result
}
diff --git a/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll b/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll
index d60c870..4287507 100644
--- a/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll
+++ b/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll
@@ -1257,21 +1257,55 @@ entry:
}
define <4 x i32> @partial_reduce_shl_sext_const_rhs6(<16 x i8> %l, <4 x i32> %part) {
-; CHECK-COMMON-LABEL: partial_reduce_shl_sext_const_rhs6:
+; CHECK-NODOT-LABEL: partial_reduce_shl_sext_const_rhs6:
+; CHECK-NODOT: // %bb.0:
+; CHECK-NODOT-NEXT: sshll v2.8h, v0.8b, #0
+; CHECK-NODOT-NEXT: sshll2 v0.8h, v0.16b, #0
+; CHECK-NODOT-NEXT: sshll v3.4s, v0.4h, #6
+; CHECK-NODOT-NEXT: sshll2 v4.4s, v2.8h, #6
+; CHECK-NODOT-NEXT: sshll v2.4s, v2.4h, #6
+; CHECK-NODOT-NEXT: sshll2 v0.4s, v0.8h, #6
+; CHECK-NODOT-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-NODOT-NEXT: add v2.4s, v4.4s, v3.4s
+; CHECK-NODOT-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-NODOT-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NODOT-NEXT: ret
+;
+; CHECK-DOT-LABEL: partial_reduce_shl_sext_const_rhs6:
+; CHECK-DOT: // %bb.0:
+; CHECK-DOT-NEXT: movi v2.16b, #64
+; CHECK-DOT-NEXT: sdot v1.4s, v0.16b, v2.16b
+; CHECK-DOT-NEXT: mov v0.16b, v1.16b
+; CHECK-DOT-NEXT: ret
+;
+; CHECK-DOT-I8MM-LABEL: partial_reduce_shl_sext_const_rhs6:
+; CHECK-DOT-I8MM: // %bb.0:
+; CHECK-DOT-I8MM-NEXT: movi v2.16b, #64
+; CHECK-DOT-I8MM-NEXT: sdot v1.4s, v0.16b, v2.16b
+; CHECK-DOT-I8MM-NEXT: mov v0.16b, v1.16b
+; CHECK-DOT-I8MM-NEXT: ret
+ %ext = sext <16 x i8> %l to <16 x i32>
+ %shift = shl nsw <16 x i32> %ext, splat (i32 6)
+ %red = tail call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> %part, <16 x i32> %shift)
+ ret <4 x i32> %red
+}
+
+define <4 x i32> @partial_reduce_shl_sext_const_rhs7(<16 x i8> %l, <4 x i32> %part) {
+; CHECK-COMMON-LABEL: partial_reduce_shl_sext_const_rhs7:
; CHECK-COMMON: // %bb.0:
; CHECK-COMMON-NEXT: sshll v2.8h, v0.8b, #0
; CHECK-COMMON-NEXT: sshll2 v0.8h, v0.16b, #0
-; CHECK-COMMON-NEXT: sshll v3.4s, v0.4h, #6
-; CHECK-COMMON-NEXT: sshll2 v4.4s, v2.8h, #6
-; CHECK-COMMON-NEXT: sshll v2.4s, v2.4h, #6
-; CHECK-COMMON-NEXT: sshll2 v0.4s, v0.8h, #6
+; CHECK-COMMON-NEXT: sshll v3.4s, v0.4h, #7
+; CHECK-COMMON-NEXT: sshll2 v4.4s, v2.8h, #7
+; CHECK-COMMON-NEXT: sshll v2.4s, v2.4h, #7
+; CHECK-COMMON-NEXT: sshll2 v0.4s, v0.8h, #7
; CHECK-COMMON-NEXT: add v1.4s, v1.4s, v2.4s
; CHECK-COMMON-NEXT: add v2.4s, v4.4s, v3.4s
; CHECK-COMMON-NEXT: add v1.4s, v1.4s, v2.4s
; CHECK-COMMON-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-COMMON-NEXT: ret
%ext = sext <16 x i8> %l to <16 x i32>
- %shift = shl nsw <16 x i32> %ext, splat (i32 6)
+ %shift = shl nsw <16 x i32> %ext, splat (i32 7)
%red = tail call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> %part, <16 x i32> %shift)
ret <4 x i32> %red
}
@@ -1331,19 +1365,33 @@ define <4 x i32> @partial_reduce_shl_sext_non_const_rhs(<16 x i8> %l, <4 x i32>
}
define <4 x i32> @partial_reduce_shl_zext_const_rhs6(<16 x i8> %l, <4 x i32> %part) {
-; CHECK-COMMON-LABEL: partial_reduce_shl_zext_const_rhs6:
-; CHECK-COMMON: // %bb.0:
-; CHECK-COMMON-NEXT: ushll v2.8h, v0.8b, #0
-; CHECK-COMMON-NEXT: ushll2 v0.8h, v0.16b, #0
-; CHECK-COMMON-NEXT: ushll v3.4s, v0.4h, #6
-; CHECK-COMMON-NEXT: ushll2 v4.4s, v2.8h, #6
-; CHECK-COMMON-NEXT: ushll v2.4s, v2.4h, #6
-; CHECK-COMMON-NEXT: ushll2 v0.4s, v0.8h, #6
-; CHECK-COMMON-NEXT: add v1.4s, v1.4s, v2.4s
-; CHECK-COMMON-NEXT: add v2.4s, v4.4s, v3.4s
-; CHECK-COMMON-NEXT: add v1.4s, v1.4s, v2.4s
-; CHECK-COMMON-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-COMMON-NEXT: ret
+; CHECK-NODOT-LABEL: partial_reduce_shl_zext_const_rhs6:
+; CHECK-NODOT: // %bb.0:
+; CHECK-NODOT-NEXT: ushll v2.8h, v0.8b, #0
+; CHECK-NODOT-NEXT: ushll2 v0.8h, v0.16b, #0
+; CHECK-NODOT-NEXT: ushll v3.4s, v0.4h, #6
+; CHECK-NODOT-NEXT: ushll2 v4.4s, v2.8h, #6
+; CHECK-NODOT-NEXT: ushll v2.4s, v2.4h, #6
+; CHECK-NODOT-NEXT: ushll2 v0.4s, v0.8h, #6
+; CHECK-NODOT-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-NODOT-NEXT: add v2.4s, v4.4s, v3.4s
+; CHECK-NODOT-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-NODOT-NEXT: add v0.4s, v1.4s, v0.4s
+; CHECK-NODOT-NEXT: ret
+;
+; CHECK-DOT-LABEL: partial_reduce_shl_zext_const_rhs6:
+; CHECK-DOT: // %bb.0:
+; CHECK-DOT-NEXT: movi v2.16b, #64
+; CHECK-DOT-NEXT: udot v1.4s, v0.16b, v2.16b
+; CHECK-DOT-NEXT: mov v0.16b, v1.16b
+; CHECK-DOT-NEXT: ret
+;
+; CHECK-DOT-I8MM-LABEL: partial_reduce_shl_zext_const_rhs6:
+; CHECK-DOT-I8MM: // %bb.0:
+; CHECK-DOT-I8MM-NEXT: movi v2.16b, #64
+; CHECK-DOT-I8MM-NEXT: udot v1.4s, v0.16b, v2.16b
+; CHECK-DOT-I8MM-NEXT: mov v0.16b, v1.16b
+; CHECK-DOT-I8MM-NEXT: ret
%ext = zext <16 x i8> %l to <16 x i32>
%shift = shl nsw <16 x i32> %ext, splat (i32 6)
%red = tail call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> %part, <16 x i32> %shift)
diff --git a/llvm/test/CodeGen/AArch64/pr161420.ll b/llvm/test/CodeGen/AArch64/pr161420.ll
new file mode 100644
index 0000000..dcdf0ed
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/pr161420.ll
@@ -0,0 +1,54 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128-Fn32"
+target triple = "arm64-apple-macosx15.0.0"
+
+; From: https://github.com/llvm/llvm-project/issues/161420. This test checks that
+; two `luti4` instructions are emitted.
+define void @pluto(ptr %arg, ptr %arg1, ptr %arg2, ptr %arg3) #0 {
+; CHECK-LABEL: pluto:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: mov w8, #0 ; =0x0
+; CHECK-NEXT: ldr zt0, [x1]
+; CHECK-NEXT: ldr z4, [x3]
+; CHECK-NEXT: ptrue pn8.h
+; CHECK-NEXT: ld1h { z0.h - z3.h }, pn8/z, [x0]
+; CHECK-NEXT: luti4 { z16.h - z19.h }, zt0, z4[0]
+; CHECK-NEXT: fmla za.h[w8, 0, vgx4], { z0.h - z3.h }, { z16.h - z19.h }
+; CHECK-NEXT: ldr zt0, [x2]
+; CHECK-NEXT: luti4 { z4.h - z7.h }, zt0, z4[0]
+; CHECK-NEXT: fmla za.h[w8, 2, vgx4], { z0.h - z3.h }, { z4.h - z7.h }
+; CHECK-NEXT: ret
+bb:
+ tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %arg1)
+ %load = load <vscale x 16 x i8>, ptr %arg3, align 16
+ %call = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c16()
+ %call4 = tail call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld1.pn.x4.nxv8f16(target("aarch64.svcount") %call, ptr %arg)
+ %extractvalue = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call4, 0
+ %extractvalue5 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call4, 1
+ %extractvalue6 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call4, 2
+ %extractvalue7 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call4, 3
+ %call8 = tail call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8f16(i32 0, <vscale x 16 x i8> %load, i32 0)
+ %extractvalue9 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call8, 0
+ %extractvalue10 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call8, 1
+ %extractvalue11 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call8, 2
+ %extractvalue12 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call8, 3
+ tail call void @llvm.aarch64.sme.fmla.vg1x4.nxv8f16(i32 0, <vscale x 8 x half> %extractvalue, <vscale x 8 x half> %extractvalue5, <vscale x 8 x half> %extractvalue6, <vscale x 8 x half> %extractvalue7, <vscale x 8 x half> %extractvalue9, <vscale x 8 x half> %extractvalue10, <vscale x 8 x half> %extractvalue11, <vscale x 8 x half> %extractvalue12)
+ tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %arg2)
+ %call13 = tail call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8f16(i32 0, <vscale x 16 x i8> %load, i32 0)
+ %extractvalue14 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call13, 0
+ %extractvalue15 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call13, 1
+ %extractvalue16 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call13, 2
+ %extractvalue17 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %call13, 3
+ tail call void @llvm.aarch64.sme.fmla.vg1x4.nxv8f16(i32 2, <vscale x 8 x half> %extractvalue, <vscale x 8 x half> %extractvalue5, <vscale x 8 x half> %extractvalue6, <vscale x 8 x half> %extractvalue7, <vscale x 8 x half> %extractvalue14, <vscale x 8 x half> %extractvalue15, <vscale x 8 x half> %extractvalue16, <vscale x 8 x half> %extractvalue17)
+ ret void
+}
+
+declare void @llvm.aarch64.sme.ldr.zt(i32, ptr)
+declare target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c16()
+declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld1.pn.x4.nxv8f16(target("aarch64.svcount"), ptr)
+declare { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8f16(i32 immarg, <vscale x 16 x i8>, i32 immarg)
+declare void @llvm.aarch64.sme.fmla.vg1x4.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
+
+attributes #0 = { mustprogress nofree noinline norecurse nosync nounwind ssp willreturn uwtable(sync) "aarch64_inout_za" "aarch64_inout_zt0" "aarch64_pstate_sm_enabled" "target-cpu"="apple-m1" "target-features"="+fp-armv8,+lse,+neon,+sme,+sme-f16f16,+sme2,+v8.1a,+v8.2a,+v8.3a,+v8.4a,+v8.5a,+v8a" }
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x4.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x4.ll
index 92d3e11..d48e0cd 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x4.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4-lane-x4.ll
@@ -48,6 +48,27 @@ define {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscal
ret {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} %res
}
+; Tests multiple identical luti4 intrinsics with ZT0 loads interspersed, are not CSD'd.
+define void @test_multiple_luti4_zt_i8(ptr %ptrA, ptr %ptrB, <vscale x 16 x i8> %x) {
+; CHECK-LABEL: test_multiple_luti4_zt_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr zt0, [x0]
+; CHECK-NEXT: luti4 { z4.s - z7.s }, zt0, z0[1]
+; CHECK-NEXT: // fake_use: $z4 $z4_z5_z6_z7
+; CHECK-NEXT: ldr zt0, [x1]
+; CHECK-NEXT: luti4 { z0.s - z3.s }, zt0, z0[1]
+; CHECK-NEXT: // fake_use: $z0 $z0_z1_z2_z3
+; CHECK-NEXT: ret
+ tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %ptrA)
+ %res1 = call {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv4f32(i32 0, <vscale x 16 x i8> %x, i32 1)
+ tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %ptrB)
+ %res2 = call {<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv4f32(i32 0, <vscale x 16 x i8> %x, i32 1)
+
+ call void (...) @llvm.fake.use({<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} %res1)
+ call void (...) @llvm.fake.use({<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>} %res2)
+ ret void
+}
+
declare {<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8i16(i32, <vscale x 16 x i8>, i32)
declare {<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv4i32(i32, <vscale x 16 x i8>, i32)
declare {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.aarch64.sme.luti4.lane.zt.x4.nxv8bf16(i32, <vscale x 16 x i8>, i32)
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4.ll
index 778f311..c1eff8d 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-luti4.ll
@@ -14,4 +14,27 @@ define {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16
ret {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} %res
}
+; Tests multiple identical luti4 intrinsics with ZT0 loads interspersed, are not CSD'd.
+define void @test_multiple_luti4_zt_i8(ptr %ptrA, ptr %ptrB, <vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1) #0 {
+; CHECK-LABEL: test_multiple_luti4_zt_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr zt0, [x0]
+; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1
+; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1
+; CHECK-NEXT: luti4 { z4.b - z7.b }, zt0, { z0, z1 }
+; CHECK-NEXT: // fake_use: $z4 $z4_z5_z6_z7
+; CHECK-NEXT: ldr zt0, [x1]
+; CHECK-NEXT: luti4 { z0.b - z3.b }, zt0, { z0, z1 }
+; CHECK-NEXT: // fake_use: $z0 $z0_z1_z2_z3
+; CHECK-NEXT: ret
+ tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %ptrA)
+ %res1 = call {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.aarch64.sme.luti4.zt.x4.nxv16i8(i32 0, <vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1)
+ tail call void @llvm.aarch64.sme.ldr.zt(i32 0, ptr %ptrB)
+ %res2 = call {<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>} @llvm.aarch64.sme.luti4.zt.x4.nxv16i8(i32 0, <vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1)
+
+ call void (...) @llvm.fake.use({ <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res1)
+ call void (...) @llvm.fake.use({ <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %res2)
+ ret void
+}
+
attributes #0 = { "target-features"="+sme2,+sme-lutv2"}
diff --git a/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir b/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir
index bff0cac..0298168 100644
--- a/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir
+++ b/llvm/test/CodeGen/AArch64/spill-fill-zpr-predicates.mir
@@ -983,26 +983,22 @@ body: |
; EXPAND-LABEL: name: zpr_predicate_spill_p4_saved
; EXPAND: liveins: $p0, $p1, $p2, $p3, $fp, $p8, $p4
; EXPAND-NEXT: {{ $}}
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1040, 0
- ; EXPAND-NEXT: frame-setup STRXui killed $fp, $sp, 128 :: (store (s64) into %stack.3)
+ ; EXPAND-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.2)
; EXPAND-NEXT: $sp = frame-setup ADDVL_XXI $sp, -2, implicit $vg
; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p8, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.2)
+ ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 0 :: (store (s128) into %stack.1)
; EXPAND-NEXT: $z0 = frame-setup CPY_ZPzI_B killed $p4, 1, 0
- ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.1)
- ; EXPAND-NEXT: $sp = frame-setup SUBXri $sp, 1024, 0
+ ; EXPAND-NEXT: frame-setup STR_ZXI $z0, $sp, 1 :: (store (s128) into %stack.0)
;
; EXPAND-NEXT: $p8 = IMPLICIT_DEF
;
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1024, 0
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.2)
+ ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 0 :: (load (s128) from %stack.1)
; EXPAND-NEXT: $p4 = frame-destroy PTRUE_B 31, implicit $vg
; EXPAND-NEXT: $p8 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
- ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.1)
+ ; EXPAND-NEXT: $z0 = frame-destroy LDR_ZXI $sp, 1 :: (load (s128) from %stack.0)
; EXPAND-NEXT: $p4 = frame-destroy CMPNE_PPzZI_B $p4, $z0, 0, implicit-def $nzcv, implicit-def $nzcv
; EXPAND-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 2, implicit $vg
- ; EXPAND-NEXT: $fp = frame-destroy LDRXui $sp, 128 :: (load (s64) from %stack.3)
- ; EXPAND-NEXT: $sp = frame-destroy ADDXri $sp, 1040, 0
+ ; EXPAND-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
; EXPAND-NEXT: RET undef $lr, implicit $p0, implicit $p1, implicit $p2, implicit $p3
; If we spill a register above p8, p4 must also be saved, so we can guarantee
diff --git a/llvm/test/CodeGen/AArch64/spillfill-sve.mir b/llvm/test/CodeGen/AArch64/spillfill-sve.mir
index 2b16dd0f..5569175 100644
--- a/llvm/test/CodeGen/AArch64/spillfill-sve.mir
+++ b/llvm/test/CodeGen/AArch64/spillfill-sve.mir
@@ -39,7 +39,7 @@ body: |
; CHECK-LABEL: name: spills_fills_stack_id_ppr
; CHECK: stack:
; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 2, alignment: 2
- ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: ''
+ ; CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: ''
; EXPAND-LABEL: name: spills_fills_stack_id_ppr
; EXPAND: STR_PXI $p0, $sp, 7
@@ -82,7 +82,7 @@ body: |
; CHECK-LABEL: name: spills_fills_stack_id_ppr2
; CHECK: stack:
; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 4, alignment: 2
- ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: ''
+ ; CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: ''
; EXPAND-LABEL: name: spills_fills_stack_id_ppr2
; EXPAND: STR_PXI $p0, $sp, 6
@@ -127,7 +127,7 @@ body: |
; CHECK-LABEL: name: spills_fills_stack_id_ppr2
; CHECK: stack:
; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 4, alignment: 2
- ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: ''
+ ; CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: ''
; EXPAND-LABEL: name: spills_fills_stack_id_ppr2mul2
; EXPAND: STR_PXI $p0, $sp, 6
@@ -172,7 +172,7 @@ body: |
; CHECK-LABEL: name: spills_fills_stack_id_pnr
; CHECK: stack:
; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 2, alignment: 2
- ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: ''
+ ; CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: ''
; EXPAND-LABEL: name: spills_fills_stack_id_pnr
; EXPAND: STR_PXI $pn0, $sp, 7
@@ -211,7 +211,7 @@ body: |
; CHECK-LABEL: name: spills_fills_stack_id_virtreg_pnr
; CHECK: stack:
; CHECK: - { id: 0, name: '', type: spill-slot, offset: 0, size: 2, alignment: 2
- ; CHECK-NEXT: stack-id: scalable-vector, callee-saved-register: ''
+ ; CHECK-NEXT: stack-id: scalable-predicate-vector, callee-saved-register: ''
; EXPAND-LABEL: name: spills_fills_stack_id_virtreg_pnr
; EXPAND: renamable $pn8 = WHILEGE_CXX_B
diff --git a/llvm/test/CodeGen/AArch64/split-sve-stack-frame-layout.ll b/llvm/test/CodeGen/AArch64/split-sve-stack-frame-layout.ll
new file mode 100644
index 0000000..690a39d
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/split-sve-stack-frame-layout.ll
@@ -0,0 +1,824 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-in-non-streaming -aarch64-split-sve-objects -aarch64-streaming-hazard-size=1024 | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-in-non-streaming -aarch64-split-sve-objects -aarch64-streaming-hazard-size=1024 -pass-remarks-analysis=stack-frame-layout 2>&1 >/dev/null | FileCheck %s --check-prefixes=CHECK-FRAMELAYOUT
+
+; CHECK-FRAMELAYOUT-LABEL: Function: zpr_and_ppr_local
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 16, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-32 x vscale], Type: Variable, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2064-32 x vscale], Type: Variable, Align: 16, Size: 1024
+
+; <GPRs>
+; %ppr_local sp+2048+30*vscale (= #15, mul vl for str/ldr PPR)
+; 14 x vscale bytes of padding sp+2048+16*vscale
+; <hazard padding> sp+1024+16*vscale
+; %zpr_local sp+1024
+; <hazard padding>
+; -> sp
+define void @zpr_and_ppr_local(<vscale x 16 x i1> %pred, <vscale x 16 x i8> %vector) "aarch64_pstate_sm_compatible" {
+; CHECK-LABEL: zpr_and_ppr_local:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0x90, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 2064 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: add x8, sp, #2048
+; CHECK-NEXT: str p0, [x8, #15, mul vl]
+; CHECK-NEXT: add x8, sp, #1024
+; CHECK-NEXT: str z0, [x8]
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %ppr_local = alloca <vscale x 16 x i1>
+ %zpr_local = alloca <vscale x 16 x i8>
+ store volatile <vscale x 16 x i1> %pred, ptr %ppr_local
+ store volatile <vscale x 16 x i8> %vector, ptr %zpr_local
+ ret void
+}
+
+; CHECK-FRAMELAYOUT-LABEL: Function: zpr_and_ppr_local_fp
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-32 x vscale], Type: Variable, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2064-32 x vscale], Type: Variable, Align: 16, Size: 1024
+
+; <GPRs>
+; -> fp
+; %ppr_local fp-2*vscale (= #-1, mul vl for str/ldr PPR)
+; 14 x vscale bytes of padding fp-16*vscale
+; <hazard padding> fp-1024-16*vscale
+; %zpr_local fp-1024-32*vscale (= #-2, mul vl for str/ldr ZPR)
+; <hazard padding>
+; -> sp
+define void @zpr_and_ppr_local_fp(<vscale x 16 x i1> %pred, <vscale x 16 x i8> %vector) "aarch64_pstate_sm_compatible" "frame-pointer"="all" {
+; CHECK-LABEL: zpr_and_ppr_local_fp:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT: mov x29, sp
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: .cfi_def_cfa w29, 16
+; CHECK-NEXT: .cfi_offset w30, -8
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: sub x8, x29, #1024
+; CHECK-NEXT: str p0, [x29, #-1, mul vl]
+; CHECK-NEXT: str z0, [x8, #-2, mul vl]
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+ %ppr_local = alloca <vscale x 16 x i1>
+ %zpr_local = alloca <vscale x 16 x i8>
+ store volatile <vscale x 16 x i1> %pred, ptr %ppr_local
+ store volatile <vscale x 16 x i8> %vector, ptr %zpr_local
+ ret void
+}
+
+; CHECK-FRAMELAYOUT-LABEL: Function: fpr_and_ppr_local
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 16, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1048-16 x vscale], Type: Variable, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2080-16 x vscale], Type: Variable, Align: 16, Size: 1024
+
+; <GPRs>
+; %ppr_local sp+2064+14*vscale (= #7, mul vl for str/ldr PPR)
+; 14 x vscale bytes of padding sp+2064
+; <hazard padding> sp+1040
+; %fpr_local sp+1032
+; 8 bytes of padding sp+1024
+; <hazard padding>
+; -> sp
+define void @fpr_and_ppr_local(<vscale x 16 x i1> %pred, double %double) "aarch64_pstate_sm_compatible" {
+; CHECK-LABEL: fpr_and_ppr_local:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: sub sp, sp, #1040
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 2080 + 8 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: add x8, sp, #2064
+; CHECK-NEXT: str p0, [x8, #7, mul vl]
+; CHECK-NEXT: str d0, [sp, #1032]
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: add sp, sp, #1040
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %ppr_local = alloca <vscale x 16 x i1>
+ %fpr_local = alloca double
+ store volatile <vscale x 16 x i1> %pred, ptr %ppr_local
+ store volatile double %double, ptr %fpr_local
+ ret void
+}
+
+; CHECK-FRAMELAYOUT-LABEL: Function: fpr_and_ppr_local_fp
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1048-16 x vscale], Type: Variable, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2080-16 x vscale], Type: Variable, Align: 16, Size: 1024
+
+; <GPRs>
+; -> fp
+; %ppr_local fp-2*vscale (= #-1, mul vl for str/ldr PPR)
+; 14 x vscale bytes of padding
+; <hazard padding>
+; %fpr_local sp+1032
+; 8 bytes of padding sp+1024
+; <hazard padding>
+; -> sp
+define void @fpr_and_ppr_local_fp(<vscale x 16 x i1> %pred, double %double) "aarch64_pstate_sm_compatible" "frame-pointer"="all" {
+; CHECK-LABEL: fpr_and_ppr_local_fp:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT: mov x29, sp
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: sub sp, sp, #1040
+; CHECK-NEXT: .cfi_def_cfa w29, 16
+; CHECK-NEXT: .cfi_offset w30, -8
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: str p0, [x29, #-1, mul vl]
+; CHECK-NEXT: str d0, [sp, #1032]
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: add sp, sp, #1040
+; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+ %ppr_local = alloca <vscale x 16 x i1>
+ %fpr_local = alloca double
+ store volatile <vscale x 16 x i1> %pred, ptr %ppr_local
+ store volatile double %double, ptr %fpr_local
+ ret void
+}
+
+; CHECK-FRAMELAYOUT-LABEL: Function: gpr_and_ppr_local
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 16, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-32 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2064-32 x vscale], Type: Variable, Align: 16, Size: 1024
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2072-32 x vscale], Type: Variable, Align: 8, Size: 8
+
+; <CS GPRs>
+; %ppr_local sp+2064+30*vscale (= #15, mul vl for str/ldr PPR)
+; 14 x vscale bytes of padding
+; <hazard padding> sp+1040+16*vscale
+; <fpr callee save: z8> sp+1040
+; <hazard padding> sp+16
+; %gpr_local sp+8
+; 8 bytes of padding
+; -> sp
+define void @gpr_and_ppr_local(<vscale x 16 x i1> %pred, i64 %int) "aarch64_pstate_sm_compatible" {
+; CHECK-LABEL: gpr_and_ppr_local:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: str z8, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1040
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xa0, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 2080 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 16 * VG - 1040
+; CHECK-NEXT: add x8, sp, #2064
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: str p0, [x8, #15, mul vl]
+; CHECK-NEXT: str x0, [sp, #8]
+; CHECK-NEXT: add sp, sp, #1040
+; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ tail call void asm sideeffect "", "~{d8}"() #1 ; Spill an FPR so hazard padding is needed
+ %ppr_local = alloca <vscale x 16 x i1>
+ %gpr_local = alloca i64
+ store volatile <vscale x 16 x i1> %pred, ptr %ppr_local
+ store volatile i64 %int, ptr %gpr_local
+ ret void
+}
+
+; CHECK-FRAMELAYOUT-LABEL: Function: gpr_and_ppr_local_fp
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Variable, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-32 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2064-32 x vscale], Type: Variable, Align: 16, Size: 1024
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2072-32 x vscale], Type: Variable, Align: 8, Size: 8
+
+; <CS GPRs>
+; -> fp
+; %ppr_local fp-2*vscale (= #-1, mul vl for str/ldr PPR)
+; 14 x vscale bytes of padding
+; <hazard padding>
+; <fpr callee save: z8>
+; <hazard padding>
+; %gpr_local sp+8
+; 8 bytes of padding
+; -> sp
+define void @gpr_and_ppr_local_fp(<vscale x 16 x i1> %pred, i64 %int) "aarch64_pstate_sm_compatible" "frame-pointer"="all" {
+; CHECK-LABEL: gpr_and_ppr_local_fp:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT: mov x29, sp
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: str z8, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1040
+; CHECK-NEXT: .cfi_def_cfa w29, 16
+; CHECK-NEXT: .cfi_offset w30, -8
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 16 * VG - 1040
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: str p0, [x29, #-1, mul vl]
+; CHECK-NEXT: str x0, [sp, #8]
+; CHECK-NEXT: add sp, sp, #1040
+; CHECK-NEXT: ldr z8, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+ tail call void asm sideeffect "", "~{d8}"() #1 ; Spill an FPR so hazard padding is needed
+ %ppr_local = alloca <vscale x 16 x i1>
+ %gpr_local = alloca i64
+ store volatile <vscale x 16 x i1> %pred, ptr %ppr_local
+ store volatile i64 %int, ptr %gpr_local
+ ret void
+}
+
+; CHECK-FRAMELAYOUT-LABEL: Function: all_stack_areas
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-2 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-4 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-6 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-8 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-10 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-12 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-14 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-16 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-18 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-20 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-22 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-24 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-34 x vscale], Type: Variable, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-64 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-80 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-96 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-112 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-128 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-144 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-160 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-176 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-192 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-208 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-224 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-240 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-256 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-272 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-288 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-304 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1040-320 x vscale], Type: Variable, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1048-320 x vscale], Type: Variable, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2080-320 x vscale], Type: Variable, Align: 16, Size: 1024
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2088-320 x vscale], Type: Variable, Align: 8, Size: 8
+
+; <CS GPRs>
+; <CS PPRs>
+; %ppr_local sp+2080+286*vscale (addvl #17, addpl #7)
+; 14 * vscale bytes of padding sp+2080+272*vscale
+; <hazard padding> sp+1056+272*vscale
+; <CS ZPRs> sp+1056+16*vscale
+; %zpr_local sp+1056
+; %fpr_local sp+1048
+; 8 bytes of padding sp+1040
+; <hazard padding> sp+16
+; %gpr_local sp+8
+; 8 bytes of padding sp
+; -> sp
+define void @all_stack_areas(<vscale x 16 x i1> %pred, double %fp) {
+; CHECK-LABEL: all_stack_areas:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-17
+; CHECK-NEXT: str z23, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1056
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0b, 0x8f, 0xb0, 0x10, 0x92, 0x2e, 0x00, 0x11, 0xa0, 0x01, 0x1e, 0x22 // sp + 2096 + 160 * VG
+; CHECK-NEXT: .cfi_offset w30, -8
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d8 @ cfa - 32 * VG - 1040
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d9 @ cfa - 40 * VG - 1040
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d10 @ cfa - 48 * VG - 1040
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d11 @ cfa - 56 * VG - 1040
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d12 @ cfa - 64 * VG - 1040
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d13 @ cfa - 72 * VG - 1040
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d14 @ cfa - 80 * VG - 1040
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xa8, 0x7f, 0x1e, 0x22, 0x11, 0xf0, 0x77, 0x22 // $d15 @ cfa - 88 * VG - 1040
+; CHECK-NEXT: add x0, sp, #2080
+; CHECK-NEXT: add x8, sp, #2080
+; CHECK-NEXT: add x1, sp, #1056
+; CHECK-NEXT: addvl x0, x0, #17
+; CHECK-NEXT: add x2, sp, #1048
+; CHECK-NEXT: add x3, sp, #8
+; CHECK-NEXT: addpl x0, x0, #7
+; CHECK-NEXT: str d0, [sp, #1048]
+; CHECK-NEXT: str p0, [x8, #143, mul vl]
+; CHECK-NEXT: bl foo
+; CHECK-NEXT: add sp, sp, #1056
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr z23, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #17
+; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+ %ppr_local = alloca <vscale x 16 x i1>
+ %zpr_local = alloca <vscale x 16 x i8>
+ %fpr_local = alloca double
+ ; // Needed to sort %fpr_local into the FPR region
+ store double %fp, ptr %fpr_local
+ ; // Needed to sort %ppr_local into the PPR region
+ store <vscale x 16 x i1> %pred, ptr %ppr_local
+ %gpr_local = alloca i64
+ call void @foo(ptr %ppr_local, ptr %zpr_local, ptr %fpr_local, ptr %gpr_local)
+ ret void
+}
+declare void @foo(ptr, ptr, ptr, ptr)
+
+; CHECK-FRAMELAYOUT-LABEL: Function: all_stack_areas_fp
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 16, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-24], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-2 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-4 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-6 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-8 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-10 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-12 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-14 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-16 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-18 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-20 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-22 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-24 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-34 x vscale], Type: Variable, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-64 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-80 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-96 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-112 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-128 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-144 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-160 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-176 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-192 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-208 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-224 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-240 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-256 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-272 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-288 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-304 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1056-320 x vscale], Type: Variable, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1064-320 x vscale], Type: Variable, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2096-320 x vscale], Type: Variable, Align: 16, Size: 1024
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2104-320 x vscale], Type: Variable, Align: 8, Size: 8
+
+; <CS GPRs>
+; -> fp
+; <CS PPRs> fp-32*vscale
+; %ppr_local fp-34*vscale (addpl #-17)
+; 14 * vscale bytes of padding fp-48*vscale
+; <hazard padding> fp-1024-48*vscale
+; <CS ZPRs> fp-1024-304*vscale
+; %zpr_local sp-1024-320*vscale (addvl #-20)
+; %fpr_local sp+1048
+; 8 bytes of padding sp+1040
+; <hazard padding> sp+16
+; %gpr_local sp+8
+; 8 bytes of padding sp
+; -> sp
+define void @all_stack_areas_fp(<vscale x 16 x i1> %pred, double %fp) "frame-pointer"="all" {
+; CHECK-LABEL: all_stack_areas_fp:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-NEXT: str x28, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: mov x29, sp
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-17
+; CHECK-NEXT: str z23, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1056
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: .cfi_def_cfa w29, 32
+; CHECK-NEXT: .cfi_offset w28, -16
+; CHECK-NEXT: .cfi_offset w30, -24
+; CHECK-NEXT: .cfi_offset w29, -32
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d8 @ cfa - 32 * VG - 1056
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d9 @ cfa - 40 * VG - 1056
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d10 @ cfa - 48 * VG - 1056
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d11 @ cfa - 56 * VG - 1056
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0b, 0x92, 0x2e, 0x00, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d12 @ cfa - 64 * VG - 1056
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d13 @ cfa - 72 * VG - 1056
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d14 @ cfa - 80 * VG - 1056
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0c, 0x92, 0x2e, 0x00, 0x11, 0xa8, 0x7f, 0x1e, 0x22, 0x11, 0xe0, 0x77, 0x22 // $d15 @ cfa - 88 * VG - 1056
+; CHECK-NEXT: sub x1, x29, #1024
+; CHECK-NEXT: addpl x0, x29, #-17
+; CHECK-NEXT: add x2, sp, #1048
+; CHECK-NEXT: addvl x1, x1, #-20
+; CHECK-NEXT: add x3, sp, #8
+; CHECK-NEXT: str d0, [sp, #1048]
+; CHECK-NEXT: str p0, [x29, #-17, mul vl]
+; CHECK-NEXT: bl foo
+; CHECK-NEXT: add sp, sp, #1056
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldr z23, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #17
+; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x28, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+ %ppr_local = alloca <vscale x 16 x i1>
+ %zpr_local = alloca <vscale x 16 x i8>
+ %fpr_local = alloca double
+ ; // Needed to sort %fpr_local into the FPR region
+ store double %fp, ptr %fpr_local
+ ; // Needed to sort %ppr_local into the PPR region
+ store <vscale x 16 x i1> %pred, ptr %ppr_local
+ %gpr_local = alloca i64
+ call void @foo(ptr %ppr_local, ptr %zpr_local, ptr %fpr_local, ptr %gpr_local)
+ ret void
+}
+
+; CHECK-FRAMELAYOUT-LABEL: Function: svecc_call
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-24], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48], Type: Spill, Align: 16, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-56], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64], Type: Spill, Align: 8, Size: 8
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-2 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-4 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-6 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-8 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-10 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-12 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-14 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-16 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-18 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-20 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-22 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-24 x vscale], Type: Spill, Align: 2, Size: vscale x 2
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-48 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-64 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-80 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-96 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-112 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-128 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-144 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-160 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-176 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-192 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-208 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-224 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-240 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-256 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-272 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-1088-288 x vscale], Type: Spill, Align: 16, Size: vscale x 16
+; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-2112-288 x vscale], Type: Variable, Align: 16, Size: 1024
+
+define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, i16 %P4) "aarch64_pstate_sm_compatible" {
+; CHECK-LABEL: svecc_call:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: stp x29, x30, [sp, #-64]! // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 64
+; CHECK-NEXT: cntd x9
+; CHECK-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT: str x9, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: stp x26, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT: mov x29, sp
+; CHECK-NEXT: .cfi_def_cfa w29, 64
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w26, -16
+; CHECK-NEXT: .cfi_offset w27, -24
+; CHECK-NEXT: .cfi_offset w28, -32
+; CHECK-NEXT: .cfi_offset vg, -48
+; CHECK-NEXT: .cfi_offset w30, -56
+; CHECK-NEXT: .cfi_offset w29, -64
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-16
+; CHECK-NEXT: str z23, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 24 * IncomingVG - 1088
+; CHECK-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 32 * IncomingVG - 1088
+; CHECK-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 40 * IncomingVG - 1088
+; CHECK-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 48 * IncomingVG - 1088
+; CHECK-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 56 * IncomingVG - 1088
+; CHECK-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 64 * IncomingVG - 1088
+; CHECK-NEXT: .cfi_escape 0x10, 0x4e, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 72 * IncomingVG - 1088
+; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 80 * IncomingVG - 1088
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: mov x8, x0
+; CHECK-NEXT: bl __arm_sme_state
+; CHECK-NEXT: mov x19, x0
+; CHECK-NEXT: //APP
+; CHECK-NEXT: //NO_APP
+; CHECK-NEXT: tbz w19, #0, .LBB8_2
+; CHECK-NEXT: // %bb.1: // %entry
+; CHECK-NEXT: smstop sm
+; CHECK-NEXT: .LBB8_2: // %entry
+; CHECK-NEXT: mov x0, x8
+; CHECK-NEXT: mov w1, #45 // =0x2d
+; CHECK-NEXT: mov w2, #37 // =0x25
+; CHECK-NEXT: bl memset
+; CHECK-NEXT: tbz w19, #0, .LBB8_4
+; CHECK-NEXT: // %bb.3: // %entry
+; CHECK-NEXT: smstart sm
+; CHECK-NEXT: .LBB8_4: // %entry
+; CHECK-NEXT: mov w0, #22647 // =0x5877
+; CHECK-NEXT: movk w0, #59491, lsl #16
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: ldr z23, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #16
+; CHECK-NEXT: .cfi_restore z8
+; CHECK-NEXT: .cfi_restore z9
+; CHECK-NEXT: .cfi_restore z10
+; CHECK-NEXT: .cfi_restore z11
+; CHECK-NEXT: .cfi_restore z12
+; CHECK-NEXT: .cfi_restore z13
+; CHECK-NEXT: .cfi_restore z14
+; CHECK-NEXT: .cfi_restore z15
+; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: .cfi_def_cfa wsp, 64
+; CHECK-NEXT: ldp x26, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x29, x30, [sp], #64 // 16-byte Folded Reload
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: .cfi_restore w19
+; CHECK-NEXT: .cfi_restore w26
+; CHECK-NEXT: .cfi_restore w27
+; CHECK-NEXT: .cfi_restore w28
+; CHECK-NEXT: .cfi_restore vg
+; CHECK-NEXT: .cfi_restore w30
+; CHECK-NEXT: .cfi_restore w29
+; CHECK-NEXT: ret
+entry:
+ tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2
+ %call = call ptr @memset(ptr noundef nonnull %P1, i32 noundef 45, i32 noundef 37)
+ ret i32 -396142473
+}
+declare ptr @memset(ptr, i32, i32)
+
+; FIXME: aarch64-split-sve-objects is currently not supported in this function
+; as it requires stack reealignment (for the 32-byte aligned alloca).
+; GPR CSRs
+; <hazard padding>
+; FPR CSRs
+; <hazrd padding>
+; <SVE locals (PPRs and ZPRs)> <--- hazard between PPRs and ZPRs here!
+; <realignment padding>
+; -> sp
+define void @zpr_and_ppr_local_realignment(<vscale x 16 x i1> %pred, <vscale x 16 x i8> %vector, i64 %gpr) "aarch64_pstate_sm_compatible" {
+; CHECK-LABEL: zpr_and_ppr_local_realignment:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub sp, sp, #1040
+; CHECK-NEXT: sub x9, sp, #1040
+; CHECK-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill
+; CHECK-NEXT: add x29, sp, #1024
+; CHECK-NEXT: addvl x9, x9, #-2
+; CHECK-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill
+; CHECK-NEXT: and sp, x9, #0xffffffffffffffe0
+; CHECK-NEXT: .cfi_def_cfa w29, 16
+; CHECK-NEXT: .cfi_offset w30, -8
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: sub x8, x29, #1024
+; CHECK-NEXT: str p0, [x8, #-1, mul vl]
+; CHECK-NEXT: str z0, [x8, #-2, mul vl]
+; CHECK-NEXT: str x0, [sp]
+; CHECK-NEXT: sub sp, x29, #1024
+; CHECK-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload
+; CHECK-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #1040
+; CHECK-NEXT: ret
+ %ppr_local = alloca <vscale x 16 x i1>
+ %zpr_local = alloca <vscale x 16 x i8>
+ %gpr_local = alloca i64, align 32
+ store volatile <vscale x 16 x i1> %pred, ptr %ppr_local
+ store volatile <vscale x 16 x i8> %vector, ptr %zpr_local
+ store volatile i64 %gpr, ptr %gpr_local
+ ret void
+}
+
+define void @zpr_and_ppr_local_stack_probing(<vscale x 16 x i1> %pred, <vscale x 16 x i8> %vector, i64 %gpr)
+; CHECK-LABEL: zpr_and_ppr_local_stack_probing:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: sub sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str xzr, [sp]
+; CHECK-NEXT: sub sp, sp, #1824
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str xzr, [sp]
+; CHECK-NEXT: .cfi_escape 0x0f, 0x09, 0x8f, 0xb0, 0x16, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 2864 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: add x8, sp, #2848
+; CHECK-NEXT: str p0, [x8, #15, mul vl]
+; CHECK-NEXT: add x8, sp, #1824
+; CHECK-NEXT: str z0, [x8]
+; CHECK-NEXT: str x0, [sp]
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: add sp, sp, #1024
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: add sp, sp, #1824
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ "probe-stack"="inline-asm" "stack-probe-size"="4096" "frame-pointer"="none" "aarch64_pstate_sm_compatible"
+{
+ %ppr_local = alloca <vscale x 16 x i1>
+ %zpr_local = alloca <vscale x 16 x i8>
+ %gpr_local = alloca i64, i64 100, align 8
+ store volatile <vscale x 16 x i1> %pred, ptr %ppr_local
+ store volatile <vscale x 16 x i8> %vector, ptr %zpr_local
+ store volatile i64 %gpr, ptr %gpr_local
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/stack-hazard.ll b/llvm/test/CodeGen/AArch64/stack-hazard.ll
index 5f52280..333a8be 100644
--- a/llvm/test/CodeGen/AArch64/stack-hazard.ll
+++ b/llvm/test/CodeGen/AArch64/stack-hazard.ll
@@ -1,7 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=0 | FileCheck %s --check-prefixes=CHECK,CHECK0
; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=64 | FileCheck %s --check-prefixes=CHECK,CHECK64
-; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=1024 | FileCheck %s --check-prefixes=CHECK,CHECK1024
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-stack-hazard-size=1024 | FileCheck %s --check-prefixes=CHECK,CHECK1024,CHECK1024-NOSPLITSVE
+; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-split-sve-objects -aarch64-stack-hazard-size=1024 | FileCheck %s --check-prefixes=CHECK,CHECK1024,CHECK1024-SPLITSVE
define i32 @basic(i32 noundef %num) {
; CHECK-LABEL: basic:
@@ -1503,72 +1504,24 @@ define [2 x <vscale x 4 x i1>] @sve_signature_pred_2xv4i1([2 x <vscale x 4 x i1>
}
define [2 x <vscale x 4 x i1>] @sve_signature_pred_2xv4i1_caller([2 x <vscale x 4 x i1>] %arg1, [2 x <vscale x 4 x i1>] %arg2) nounwind "aarch64_pstate_sm_compatible" {
-; CHECK0-LABEL: sve_signature_pred_2xv4i1_caller:
-; CHECK0: // %bb.0:
-; CHECK0-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; CHECK0-NEXT: addvl sp, sp, #-1
-; CHECK0-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
-; CHECK0-NEXT: mov p5.b, p0.b
-; CHECK0-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK0-NEXT: mov p4.b, p1.b
-; CHECK0-NEXT: mov p0.b, p2.b
-; CHECK0-NEXT: mov p1.b, p3.b
-; CHECK0-NEXT: mov p2.b, p5.b
-; CHECK0-NEXT: mov p3.b, p4.b
-; CHECK0-NEXT: bl sve_signature_pred_2xv4i1
-; CHECK0-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK0-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK0-NEXT: addvl sp, sp, #1
-; CHECK0-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
-; CHECK0-NEXT: ret
-;
-; CHECK64-LABEL: sve_signature_pred_2xv4i1_caller:
-; CHECK64: // %bb.0:
-; CHECK64-NEXT: sub sp, sp, #80
-; CHECK64-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill
-; CHECK64-NEXT: addvl sp, sp, #-1
-; CHECK64-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
-; CHECK64-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK64-NEXT: sub sp, sp, #64
-; CHECK64-NEXT: mov p4.b, p1.b
-; CHECK64-NEXT: mov p5.b, p0.b
-; CHECK64-NEXT: mov p0.b, p2.b
-; CHECK64-NEXT: mov p1.b, p3.b
-; CHECK64-NEXT: mov p2.b, p5.b
-; CHECK64-NEXT: mov p3.b, p4.b
-; CHECK64-NEXT: bl sve_signature_pred_2xv4i1
-; CHECK64-NEXT: add sp, sp, #64
-; CHECK64-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK64-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK64-NEXT: addvl sp, sp, #1
-; CHECK64-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload
-; CHECK64-NEXT: add sp, sp, #80
-; CHECK64-NEXT: ret
-;
-; CHECK1024-LABEL: sve_signature_pred_2xv4i1_caller:
-; CHECK1024: // %bb.0:
-; CHECK1024-NEXT: sub sp, sp, #1040
-; CHECK1024-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill
-; CHECK1024-NEXT: addvl sp, sp, #-1
-; CHECK1024-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: sub sp, sp, #1024
-; CHECK1024-NEXT: mov p4.b, p1.b
-; CHECK1024-NEXT: mov p5.b, p0.b
-; CHECK1024-NEXT: mov p0.b, p2.b
-; CHECK1024-NEXT: mov p1.b, p3.b
-; CHECK1024-NEXT: mov p2.b, p5.b
-; CHECK1024-NEXT: mov p3.b, p4.b
-; CHECK1024-NEXT: bl sve_signature_pred_2xv4i1
-; CHECK1024-NEXT: add sp, sp, #1024
-; CHECK1024-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: addvl sp, sp, #1
-; CHECK1024-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload
-; CHECK1024-NEXT: add sp, sp, #1040
-; CHECK1024-NEXT: ret
+; CHECK-LABEL: sve_signature_pred_2xv4i1_caller:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p5.b, p0.b
+; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK-NEXT: mov p4.b, p1.b
+; CHECK-NEXT: mov p0.b, p2.b
+; CHECK-NEXT: mov p1.b, p3.b
+; CHECK-NEXT: mov p2.b, p5.b
+; CHECK-NEXT: mov p3.b, p4.b
+; CHECK-NEXT: bl sve_signature_pred_2xv4i1
+; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: addvl sp, sp, #1
+; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT: ret
%res = call [2 x <vscale x 4 x i1>] @sve_signature_pred_2xv4i1([2 x <vscale x 4 x i1>] %arg2, [2 x <vscale x 4 x i1>] %arg1)
ret [2 x <vscale x 4 x i1>] %res
}
@@ -2113,139 +2066,269 @@ define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3,
; CHECK64-NEXT: .cfi_restore w29
; CHECK64-NEXT: ret
;
-; CHECK1024-LABEL: svecc_call:
-; CHECK1024: // %bb.0: // %entry
-; CHECK1024-NEXT: sub sp, sp, #1088
-; CHECK1024-NEXT: .cfi_def_cfa_offset 1088
-; CHECK1024-NEXT: cntd x9
-; CHECK1024-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x9, [sp, #1040] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x28, [sp, #1048] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x27, [sp, #1056] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x26, [sp, #1064] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x19, [sp, #1072] // 8-byte Folded Spill
-; CHECK1024-NEXT: add x29, sp, #1024
-; CHECK1024-NEXT: .cfi_def_cfa w29, 64
-; CHECK1024-NEXT: .cfi_offset w19, -16
-; CHECK1024-NEXT: .cfi_offset w26, -24
-; CHECK1024-NEXT: .cfi_offset w27, -32
-; CHECK1024-NEXT: .cfi_offset w28, -40
-; CHECK1024-NEXT: .cfi_offset vg, -48
-; CHECK1024-NEXT: .cfi_offset w30, -56
-; CHECK1024-NEXT: .cfi_offset w29, -64
-; CHECK1024-NEXT: addvl sp, sp, #-18
-; CHECK1024-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * IncomingVG - 1088
-; CHECK1024-NEXT: sub sp, sp, #1024
-; CHECK1024-NEXT: mov x8, x0
-; CHECK1024-NEXT: bl __arm_sme_state
-; CHECK1024-NEXT: mov x19, x0
-; CHECK1024-NEXT: //APP
-; CHECK1024-NEXT: //NO_APP
-; CHECK1024-NEXT: tbz w19, #0, .LBB28_2
-; CHECK1024-NEXT: // %bb.1: // %entry
-; CHECK1024-NEXT: smstop sm
-; CHECK1024-NEXT: .LBB28_2: // %entry
-; CHECK1024-NEXT: mov x0, x8
-; CHECK1024-NEXT: mov w1, #45 // =0x2d
-; CHECK1024-NEXT: mov w2, #37 // =0x25
-; CHECK1024-NEXT: bl memset
-; CHECK1024-NEXT: tbz w19, #0, .LBB28_4
-; CHECK1024-NEXT: // %bb.3: // %entry
-; CHECK1024-NEXT: smstart sm
-; CHECK1024-NEXT: .LBB28_4: // %entry
-; CHECK1024-NEXT: mov w0, #22647 // =0x5877
-; CHECK1024-NEXT: movk w0, #59491, lsl #16
-; CHECK1024-NEXT: add sp, sp, #1024
-; CHECK1024-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: addvl sp, sp, #18
-; CHECK1024-NEXT: .cfi_restore z8
-; CHECK1024-NEXT: .cfi_restore z9
-; CHECK1024-NEXT: .cfi_restore z10
-; CHECK1024-NEXT: .cfi_restore z11
-; CHECK1024-NEXT: .cfi_restore z12
-; CHECK1024-NEXT: .cfi_restore z13
-; CHECK1024-NEXT: .cfi_restore z14
-; CHECK1024-NEXT: .cfi_restore z15
-; CHECK1024-NEXT: .cfi_def_cfa wsp, 1088
-; CHECK1024-NEXT: ldr x19, [sp, #1072] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x26, [sp, #1064] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x27, [sp, #1056] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x28, [sp, #1048] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload
-; CHECK1024-NEXT: add sp, sp, #1088
-; CHECK1024-NEXT: .cfi_def_cfa_offset 0
-; CHECK1024-NEXT: .cfi_restore w19
-; CHECK1024-NEXT: .cfi_restore w26
-; CHECK1024-NEXT: .cfi_restore w27
-; CHECK1024-NEXT: .cfi_restore w28
-; CHECK1024-NEXT: .cfi_restore vg
-; CHECK1024-NEXT: .cfi_restore w30
-; CHECK1024-NEXT: .cfi_restore w29
-; CHECK1024-NEXT: ret
+; CHECK1024-NOSPLITSVE-LABEL: svecc_call:
+; CHECK1024-NOSPLITSVE: // %bb.0: // %entry
+; CHECK1024-NOSPLITSVE-NEXT: sub sp, sp, #1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa_offset 1088
+; CHECK1024-NOSPLITSVE-NEXT: cntd x9
+; CHECK1024-NOSPLITSVE-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x9, [sp, #1040] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x28, [sp, #1048] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x27, [sp, #1056] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x26, [sp, #1064] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x19, [sp, #1072] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: add x29, sp, #1024
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa w29, 64
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w19, -16
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w26, -24
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w27, -32
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w28, -40
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset vg, -48
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w30, -56
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w29, -64
+; CHECK1024-NOSPLITSVE-NEXT: addvl sp, sp, #-18
+; CHECK1024-NOSPLITSVE-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4e, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4f, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: sub sp, sp, #1024
+; CHECK1024-NOSPLITSVE-NEXT: mov x8, x0
+; CHECK1024-NOSPLITSVE-NEXT: bl __arm_sme_state
+; CHECK1024-NOSPLITSVE-NEXT: mov x19, x0
+; CHECK1024-NOSPLITSVE-NEXT: //APP
+; CHECK1024-NOSPLITSVE-NEXT: //NO_APP
+; CHECK1024-NOSPLITSVE-NEXT: tbz w19, #0, .LBB28_2
+; CHECK1024-NOSPLITSVE-NEXT: // %bb.1: // %entry
+; CHECK1024-NOSPLITSVE-NEXT: smstop sm
+; CHECK1024-NOSPLITSVE-NEXT: .LBB28_2: // %entry
+; CHECK1024-NOSPLITSVE-NEXT: mov x0, x8
+; CHECK1024-NOSPLITSVE-NEXT: mov w1, #45 // =0x2d
+; CHECK1024-NOSPLITSVE-NEXT: mov w2, #37 // =0x25
+; CHECK1024-NOSPLITSVE-NEXT: bl memset
+; CHECK1024-NOSPLITSVE-NEXT: tbz w19, #0, .LBB28_4
+; CHECK1024-NOSPLITSVE-NEXT: // %bb.3: // %entry
+; CHECK1024-NOSPLITSVE-NEXT: smstart sm
+; CHECK1024-NOSPLITSVE-NEXT: .LBB28_4: // %entry
+; CHECK1024-NOSPLITSVE-NEXT: mov w0, #22647 // =0x5877
+; CHECK1024-NOSPLITSVE-NEXT: movk w0, #59491, lsl #16
+; CHECK1024-NOSPLITSVE-NEXT: add sp, sp, #1024
+; CHECK1024-NOSPLITSVE-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: addvl sp, sp, #18
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z8
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z9
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z10
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z11
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z12
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z13
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z14
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z15
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa wsp, 1088
+; CHECK1024-NOSPLITSVE-NEXT: ldr x19, [sp, #1072] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr x26, [sp, #1064] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr x27, [sp, #1056] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr x28, [sp, #1048] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: add sp, sp, #1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa_offset 0
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w19
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w26
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w27
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w28
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore vg
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w30
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w29
+; CHECK1024-NOSPLITSVE-NEXT: ret
+;
+; CHECK1024-SPLITSVE-LABEL: svecc_call:
+; CHECK1024-SPLITSVE: // %bb.0: // %entry
+; CHECK1024-SPLITSVE-NEXT: stp x29, x30, [sp, #-64]! // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa_offset 64
+; CHECK1024-SPLITSVE-NEXT: cntd x9
+; CHECK1024-SPLITSVE-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str x9, [sp, #16] // 8-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: stp x26, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: mov x29, sp
+; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa w29, 64
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w19, -8
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w26, -16
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w27, -24
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w28, -32
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset vg, -48
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w30, -56
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w29, -64
+; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #-2
+; CHECK1024-SPLITSVE-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: sub sp, sp, #1024
+; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #-16
+; CHECK1024-SPLITSVE-NEXT: str z23, [sp] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 24 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 32 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 40 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 48 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 56 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 64 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4e, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 72 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4f, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 80 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: sub sp, sp, #1024
+; CHECK1024-SPLITSVE-NEXT: mov x8, x0
+; CHECK1024-SPLITSVE-NEXT: bl __arm_sme_state
+; CHECK1024-SPLITSVE-NEXT: mov x19, x0
+; CHECK1024-SPLITSVE-NEXT: //APP
+; CHECK1024-SPLITSVE-NEXT: //NO_APP
+; CHECK1024-SPLITSVE-NEXT: tbz w19, #0, .LBB28_2
+; CHECK1024-SPLITSVE-NEXT: // %bb.1: // %entry
+; CHECK1024-SPLITSVE-NEXT: smstop sm
+; CHECK1024-SPLITSVE-NEXT: .LBB28_2: // %entry
+; CHECK1024-SPLITSVE-NEXT: mov x0, x8
+; CHECK1024-SPLITSVE-NEXT: mov w1, #45 // =0x2d
+; CHECK1024-SPLITSVE-NEXT: mov w2, #37 // =0x25
+; CHECK1024-SPLITSVE-NEXT: bl memset
+; CHECK1024-SPLITSVE-NEXT: tbz w19, #0, .LBB28_4
+; CHECK1024-SPLITSVE-NEXT: // %bb.3: // %entry
+; CHECK1024-SPLITSVE-NEXT: smstart sm
+; CHECK1024-SPLITSVE-NEXT: .LBB28_4: // %entry
+; CHECK1024-SPLITSVE-NEXT: mov w0, #22647 // =0x5877
+; CHECK1024-SPLITSVE-NEXT: movk w0, #59491, lsl #16
+; CHECK1024-SPLITSVE-NEXT: add sp, sp, #1024
+; CHECK1024-SPLITSVE-NEXT: ldr z23, [sp] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: add sp, sp, #1024
+; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #16
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z8
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z9
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z10
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z11
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z12
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z13
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z14
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z15
+; CHECK1024-SPLITSVE-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #2
+; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa wsp, 64
+; CHECK1024-SPLITSVE-NEXT: ldp x26, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldp x29, x30, [sp], #64 // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa_offset 0
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w19
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w26
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w27
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w28
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore vg
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w30
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w29
+; CHECK1024-SPLITSVE-NEXT: ret
entry:
tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2
%call = call ptr @memset(ptr noundef nonnull %P1, i32 noundef 45, i32 noundef 37)
@@ -2505,138 +2588,267 @@ define i32 @svecc_alloca_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8
; CHECK64-NEXT: .cfi_restore w29
; CHECK64-NEXT: ret
;
-; CHECK1024-LABEL: svecc_alloca_call:
-; CHECK1024: // %bb.0: // %entry
-; CHECK1024-NEXT: sub sp, sp, #1088
-; CHECK1024-NEXT: .cfi_def_cfa_offset 1088
-; CHECK1024-NEXT: cntd x9
-; CHECK1024-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x9, [sp, #1040] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x28, [sp, #1048] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x27, [sp, #1056] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x26, [sp, #1064] // 8-byte Folded Spill
-; CHECK1024-NEXT: str x19, [sp, #1072] // 8-byte Folded Spill
-; CHECK1024-NEXT: add x29, sp, #1024
-; CHECK1024-NEXT: .cfi_def_cfa w29, 64
-; CHECK1024-NEXT: .cfi_offset w19, -16
-; CHECK1024-NEXT: .cfi_offset w26, -24
-; CHECK1024-NEXT: .cfi_offset w27, -32
-; CHECK1024-NEXT: .cfi_offset w28, -40
-; CHECK1024-NEXT: .cfi_offset vg, -48
-; CHECK1024-NEXT: .cfi_offset w30, -56
-; CHECK1024-NEXT: .cfi_offset w29, -64
-; CHECK1024-NEXT: addvl sp, sp, #-18
-; CHECK1024-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
-; CHECK1024-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4e, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * IncomingVG - 1088
-; CHECK1024-NEXT: .cfi_escape 0x10, 0x4f, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * IncomingVG - 1088
-; CHECK1024-NEXT: sub sp, sp, #1072
-; CHECK1024-NEXT: bl __arm_sme_state
-; CHECK1024-NEXT: mov x19, x0
-; CHECK1024-NEXT: //APP
-; CHECK1024-NEXT: //NO_APP
-; CHECK1024-NEXT: tbz w19, #0, .LBB29_2
-; CHECK1024-NEXT: // %bb.1: // %entry
-; CHECK1024-NEXT: smstop sm
-; CHECK1024-NEXT: .LBB29_2: // %entry
-; CHECK1024-NEXT: mov x0, sp
-; CHECK1024-NEXT: mov w1, #45 // =0x2d
-; CHECK1024-NEXT: mov w2, #37 // =0x25
-; CHECK1024-NEXT: bl memset
-; CHECK1024-NEXT: tbz w19, #0, .LBB29_4
-; CHECK1024-NEXT: // %bb.3: // %entry
-; CHECK1024-NEXT: smstart sm
-; CHECK1024-NEXT: .LBB29_4: // %entry
-; CHECK1024-NEXT: mov w0, #22647 // =0x5877
-; CHECK1024-NEXT: movk w0, #59491, lsl #16
-; CHECK1024-NEXT: add sp, sp, #1072
-; CHECK1024-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
-; CHECK1024-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
-; CHECK1024-NEXT: addvl sp, sp, #18
-; CHECK1024-NEXT: .cfi_restore z8
-; CHECK1024-NEXT: .cfi_restore z9
-; CHECK1024-NEXT: .cfi_restore z10
-; CHECK1024-NEXT: .cfi_restore z11
-; CHECK1024-NEXT: .cfi_restore z12
-; CHECK1024-NEXT: .cfi_restore z13
-; CHECK1024-NEXT: .cfi_restore z14
-; CHECK1024-NEXT: .cfi_restore z15
-; CHECK1024-NEXT: .cfi_def_cfa wsp, 1088
-; CHECK1024-NEXT: ldr x19, [sp, #1072] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x26, [sp, #1064] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x27, [sp, #1056] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x28, [sp, #1048] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload
-; CHECK1024-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload
-; CHECK1024-NEXT: add sp, sp, #1088
-; CHECK1024-NEXT: .cfi_def_cfa_offset 0
-; CHECK1024-NEXT: .cfi_restore w19
-; CHECK1024-NEXT: .cfi_restore w26
-; CHECK1024-NEXT: .cfi_restore w27
-; CHECK1024-NEXT: .cfi_restore w28
-; CHECK1024-NEXT: .cfi_restore vg
-; CHECK1024-NEXT: .cfi_restore w30
-; CHECK1024-NEXT: .cfi_restore w29
-; CHECK1024-NEXT: ret
+; CHECK1024-NOSPLITSVE-LABEL: svecc_alloca_call:
+; CHECK1024-NOSPLITSVE: // %bb.0: // %entry
+; CHECK1024-NOSPLITSVE-NEXT: sub sp, sp, #1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa_offset 1088
+; CHECK1024-NOSPLITSVE-NEXT: cntd x9
+; CHECK1024-NOSPLITSVE-NEXT: str x29, [sp, #1024] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x30, [sp, #1032] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x9, [sp, #1040] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x28, [sp, #1048] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x27, [sp, #1056] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x26, [sp, #1064] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str x19, [sp, #1072] // 8-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: add x29, sp, #1024
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa w29, 64
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w19, -16
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w26, -24
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w27, -32
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w28, -40
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset vg, -48
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w30, -56
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_offset w29, -64
+; CHECK1024-NOSPLITSVE-NEXT: addvl sp, sp, #-18
+; CHECK1024-NOSPLITSVE-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x78, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 8 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x70, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 16 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 24 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 32 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 40 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 48 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4e, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 56 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_escape 0x10, 0x4f, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 64 * IncomingVG - 1088
+; CHECK1024-NOSPLITSVE-NEXT: sub sp, sp, #1072
+; CHECK1024-NOSPLITSVE-NEXT: bl __arm_sme_state
+; CHECK1024-NOSPLITSVE-NEXT: mov x19, x0
+; CHECK1024-NOSPLITSVE-NEXT: //APP
+; CHECK1024-NOSPLITSVE-NEXT: //NO_APP
+; CHECK1024-NOSPLITSVE-NEXT: tbz w19, #0, .LBB29_2
+; CHECK1024-NOSPLITSVE-NEXT: // %bb.1: // %entry
+; CHECK1024-NOSPLITSVE-NEXT: smstop sm
+; CHECK1024-NOSPLITSVE-NEXT: .LBB29_2: // %entry
+; CHECK1024-NOSPLITSVE-NEXT: mov x0, sp
+; CHECK1024-NOSPLITSVE-NEXT: mov w1, #45 // =0x2d
+; CHECK1024-NOSPLITSVE-NEXT: mov w2, #37 // =0x25
+; CHECK1024-NOSPLITSVE-NEXT: bl memset
+; CHECK1024-NOSPLITSVE-NEXT: tbz w19, #0, .LBB29_4
+; CHECK1024-NOSPLITSVE-NEXT: // %bb.3: // %entry
+; CHECK1024-NOSPLITSVE-NEXT: smstart sm
+; CHECK1024-NOSPLITSVE-NEXT: .LBB29_4: // %entry
+; CHECK1024-NOSPLITSVE-NEXT: mov w0, #22647 // =0x5877
+; CHECK1024-NOSPLITSVE-NEXT: movk w0, #59491, lsl #16
+; CHECK1024-NOSPLITSVE-NEXT: add sp, sp, #1072
+; CHECK1024-NOSPLITSVE-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: addvl sp, sp, #18
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z8
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z9
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z10
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z11
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z12
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z13
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z14
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore z15
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa wsp, 1088
+; CHECK1024-NOSPLITSVE-NEXT: ldr x19, [sp, #1072] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr x26, [sp, #1064] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr x27, [sp, #1056] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr x28, [sp, #1048] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr x30, [sp, #1032] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: ldr x29, [sp, #1024] // 8-byte Folded Reload
+; CHECK1024-NOSPLITSVE-NEXT: add sp, sp, #1088
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_def_cfa_offset 0
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w19
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w26
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w27
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w28
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore vg
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w30
+; CHECK1024-NOSPLITSVE-NEXT: .cfi_restore w29
+; CHECK1024-NOSPLITSVE-NEXT: ret
+;
+; CHECK1024-SPLITSVE-LABEL: svecc_alloca_call:
+; CHECK1024-SPLITSVE: // %bb.0: // %entry
+; CHECK1024-SPLITSVE-NEXT: stp x29, x30, [sp, #-64]! // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa_offset 64
+; CHECK1024-SPLITSVE-NEXT: cntd x9
+; CHECK1024-SPLITSVE-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str x9, [sp, #16] // 8-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: stp x26, x19, [sp, #48] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: mov x29, sp
+; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa w29, 64
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w19, -8
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w26, -16
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w27, -24
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w28, -32
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset vg, -48
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w30, -56
+; CHECK1024-SPLITSVE-NEXT: .cfi_offset w29, -64
+; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #-2
+; CHECK1024-SPLITSVE-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: sub sp, sp, #1024
+; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #-16
+; CHECK1024-SPLITSVE-NEXT: str z23, [sp] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z22, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z21, [sp, #2, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z20, [sp, #3, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z19, [sp, #4, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z18, [sp, #5, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z17, [sp, #6, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z16, [sp, #7, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z15, [sp, #8, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z14, [sp, #9, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z13, [sp, #10, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z12, [sp, #11, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z11, [sp, #12, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z10, [sp, #13, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z9, [sp, #14, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: str z8, [sp, #15, mul vl] // 16-byte Folded Spill
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x48, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d8 @ cfa - 24 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x49, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d9 @ cfa - 32 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4a, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d10 @ cfa - 40 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4b, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d11 @ cfa - 48 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4c, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d12 @ cfa - 56 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4d, 0x0d, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d13 @ cfa - 64 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4e, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb8, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d14 @ cfa - 72 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: .cfi_escape 0x10, 0x4f, 0x0e, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0xb0, 0x7f, 0x1e, 0x22, 0x11, 0xc0, 0x77, 0x22 // $d15 @ cfa - 80 * IncomingVG - 1088
+; CHECK1024-SPLITSVE-NEXT: sub sp, sp, #1072
+; CHECK1024-SPLITSVE-NEXT: bl __arm_sme_state
+; CHECK1024-SPLITSVE-NEXT: mov x19, x0
+; CHECK1024-SPLITSVE-NEXT: //APP
+; CHECK1024-SPLITSVE-NEXT: //NO_APP
+; CHECK1024-SPLITSVE-NEXT: tbz w19, #0, .LBB29_2
+; CHECK1024-SPLITSVE-NEXT: // %bb.1: // %entry
+; CHECK1024-SPLITSVE-NEXT: smstop sm
+; CHECK1024-SPLITSVE-NEXT: .LBB29_2: // %entry
+; CHECK1024-SPLITSVE-NEXT: mov x0, sp
+; CHECK1024-SPLITSVE-NEXT: mov w1, #45 // =0x2d
+; CHECK1024-SPLITSVE-NEXT: mov w2, #37 // =0x25
+; CHECK1024-SPLITSVE-NEXT: bl memset
+; CHECK1024-SPLITSVE-NEXT: tbz w19, #0, .LBB29_4
+; CHECK1024-SPLITSVE-NEXT: // %bb.3: // %entry
+; CHECK1024-SPLITSVE-NEXT: smstart sm
+; CHECK1024-SPLITSVE-NEXT: .LBB29_4: // %entry
+; CHECK1024-SPLITSVE-NEXT: mov w0, #22647 // =0x5877
+; CHECK1024-SPLITSVE-NEXT: movk w0, #59491, lsl #16
+; CHECK1024-SPLITSVE-NEXT: add sp, sp, #1072
+; CHECK1024-SPLITSVE-NEXT: ldr z23, [sp] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z22, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z21, [sp, #2, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z20, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z19, [sp, #4, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z18, [sp, #5, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z17, [sp, #6, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z16, [sp, #7, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z15, [sp, #8, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z14, [sp, #9, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z13, [sp, #10, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z12, [sp, #11, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr z8, [sp, #15, mul vl] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: add sp, sp, #1024
+; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #16
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z8
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z9
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z10
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z11
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z12
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z13
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z14
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore z15
+; CHECK1024-SPLITSVE-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: addvl sp, sp, #2
+; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa wsp, 64
+; CHECK1024-SPLITSVE-NEXT: ldp x26, x19, [sp, #48] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: ldp x29, x30, [sp], #64 // 16-byte Folded Reload
+; CHECK1024-SPLITSVE-NEXT: .cfi_def_cfa_offset 0
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w19
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w26
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w27
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w28
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore vg
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w30
+; CHECK1024-SPLITSVE-NEXT: .cfi_restore w29
+; CHECK1024-SPLITSVE-NEXT: ret
entry:
tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2
diff --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll
index 7bddd1d..cc63c7f 100644
--- a/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll
+++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll
@@ -56,9 +56,9 @@ define aarch64_sve_vector_pcs <vscale x 16 x i1> @caller_with_many_svepred_arg(<
; CHECK: name: caller_with_many_svepred_arg
; CHECK: stack:
; CHECK: - { id: 0, name: '', type: default, offset: 0, size: 2, alignment: 2,
-; CHECK-NEXT: stack-id: scalable-vector
+; CHECK-NEXT: stack-id: scalable-predicate-vector
; CHECK: - { id: 1, name: '', type: default, offset: 0, size: 2, alignment: 2,
-; CHECK-NEXT: stack-id: scalable-vector
+; CHECK-NEXT: stack-id: scalable-predicate-vector
; CHECK-DAG: STR_PXI %{{[0-9]+}}, %stack.0, 0
; CHECK-DAG: STR_PXI %{{[0-9]+}}, %stack.1, 0
; CHECK-DAG: [[BASE1:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0
@@ -90,7 +90,7 @@ define aarch64_sve_vector_pcs <vscale x 16 x i1> @caller_with_svepred_arg_1xv16i
; CHECK: name: caller_with_svepred_arg_1xv16i1_4xv16i1
; CHECK: stack:
; CHECK: - { id: 0, name: '', type: default, offset: 0, size: 2, alignment: 2,
-; CHECK-NEXT: stack-id: scalable-vector,
+; CHECK-NEXT: stack-id: scalable-predicate-vector,
; CHECK: [[PRED0:%[0-9]+]]:ppr = COPY $p0
; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
; CHECK: STR_PXI [[PRED0]], %stack.0, 0 :: (store (<vscale x 1 x s16>) into %stack.0)
@@ -139,7 +139,7 @@ define [4 x <vscale x 16 x i1>] @caller_with_svepred_arg_4xv16i1_4xv16i1([4 x <v
; CHECK: name: caller_with_svepred_arg_4xv16i1_4xv16i1
; CHECK: stack:
; CHECK: - { id: 0, name: '', type: default, offset: 0, size: 8, alignment: 2,
-; CHECK-NEXT: stack-id: scalable-vector,
+; CHECK-NEXT: stack-id: scalable-predicate-vector,
; CHECK: [[PRED3:%[0-9]+]]:ppr = COPY $p3
; CHECK: [[PRED2:%[0-9]+]]:ppr = COPY $p2
; CHECK: [[PRED1:%[0-9]+]]:ppr = COPY $p1
@@ -200,7 +200,7 @@ define [2 x <vscale x 32 x i1>] @caller_with_svepred_arg_2xv32i1_1xv16i1([2 x <v
; CHECK: name: caller_with_svepred_arg_2xv32i1_1xv16i1
; CHECK: stack:
; CHECK: - { id: 0, name: '', type: default, offset: 0, size: 8, alignment: 2,
-; CHECK-NEXT: stack-id: scalable-vector,
+; CHECK-NEXT: stack-id: scalable-predicate-vector,
; CHECK: [[PRED3:%[0-9]+]]:ppr = COPY $p3
; CHECK: [[PRED2:%[0-9]+]]:ppr = COPY $p2
; CHECK: [[PRED1:%[0-9]+]]:ppr = COPY $p1
diff --git a/llvm/test/CodeGen/AArch64/sve-load-store-legalisation.ll b/llvm/test/CodeGen/AArch64/sve-load-store-legalisation.ll
new file mode 100644
index 0000000..584753b
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-load-store-legalisation.ll
@@ -0,0 +1,2854 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mattr=+sve < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+define void @sve_load_store_nxv1i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv1i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x i8>, ptr %a
+ store <vscale x 1 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv2i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv2i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x i8>, ptr %a
+ store <vscale x 2 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv3i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv3i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x i8>, ptr %a
+ store <vscale x 3 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv4i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv4i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x i8>, ptr %a
+ store <vscale x 4 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv5i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv5i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #5 // =0x5
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 5 x i8>, ptr %a
+ store <vscale x 5 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv6i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv6i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1b { z1.s }, p1/z, [x0]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: st1b { z1.s }, p1, [x1]
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uunpkhi z0.s, z0.h
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: st1b { z0.d }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 6 x i8>, ptr %a
+ store <vscale x 6 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv7i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv7i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #7 // =0x7
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 7 x i8>, ptr %a
+ store <vscale x 7 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv8i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv8i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 8 x i8>, ptr %a
+ store <vscale x 8 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv9i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv9i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #9 // =0x9
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 9 x i8>, ptr %a
+ store <vscale x 9 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv10i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv10i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, #4, mul vl]
+; CHECK-NEXT: ld1b { z1.h }, p1/z, [x0]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b
+; CHECK-NEXT: uunpkhi z1.h, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: st1b { z0.h }, p1, [x1]
+; CHECK-NEXT: uunpklo z1.d, z1.s
+; CHECK-NEXT: st1b { z1.d }, p0, [x1, #4, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 10 x i8>, ptr %a
+ store <vscale x 10 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv11i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv11i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #11 // =0xb
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 11 x i8>, ptr %a
+ store <vscale x 11 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv12i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv12i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1b { z1.h }, p1/z, [x0]
+; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b
+; CHECK-NEXT: uunpkhi z1.h, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: st1b { z0.h }, p1, [x1]
+; CHECK-NEXT: st1b { z1.s }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 12 x i8>, ptr %a
+ store <vscale x 12 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv13i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv13i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #13 // =0xd
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 13 x i8>, ptr %a
+ store <vscale x 13 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv14i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv14i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, #6, mul vl]
+; CHECK-NEXT: ptrue p2.h
+; CHECK-NEXT: ld1b { z1.s }, p1/z, [x0, #2, mul vl]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: ld1b { z1.h }, p2/z, [x0]
+; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b
+; CHECK-NEXT: uunpkhi z1.h, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpkhi z2.s, z1.h
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: st1b { z0.h }, p2, [x1]
+; CHECK-NEXT: uunpklo z2.d, z2.s
+; CHECK-NEXT: st1b { z1.s }, p1, [x1, #2, mul vl]
+; CHECK-NEXT: st1b { z2.d }, p0, [x1, #6, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 14 x i8>, ptr %a
+ store <vscale x 14 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv15i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv15i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #15 // =0xf
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 15 x i8>, ptr %a
+ store <vscale x 15 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv16i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 16 x i8>, ptr %a
+ store <vscale x 16 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv17i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv17i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w10, #17 // =0x11
+; CHECK-NEXT: lsr x9, x8, #4
+; CHECK-NEXT: mul x9, x9, x10
+; CHECK-NEXT: whilelo p0.b, x8, x9
+; CHECK-NEXT: whilelo p1.b, xzr, x9
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl]
+; CHECK-NEXT: st1b { z1.b }, p1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 17 x i8>, ptr %a
+ store <vscale x 17 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv18i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv18i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, x8]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpklo z1.s, z0.h
+; CHECK-NEXT: uunpkhi z0.s, z0.h
+; CHECK-NEXT: uunpklo z1.d, z1.s
+; CHECK-NEXT: uzp1 z1.s, z1.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpkhi z1.s, z0.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uunpkhi z1.d, z1.s
+; CHECK-NEXT: uzp1 z1.s, z0.s, z1.s
+; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h
+; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpkhi z1.s, z0.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uunpklo z1.d, z1.s
+; CHECK-NEXT: uzp1 z1.s, z1.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h
+; CHECK-NEXT: uzp1 z1.b, z0.b, z0.b
+; CHECK-NEXT: uunpkhi z1.h, z1.b
+; CHECK-NEXT: uunpklo z2.s, z1.h
+; CHECK-NEXT: uunpkhi z1.s, z1.h
+; CHECK-NEXT: uunpkhi z2.d, z2.s
+; CHECK-NEXT: uzp1 z2.s, z0.s, z2.s
+; CHECK-NEXT: uzp1 z1.h, z2.h, z1.h
+; CHECK-NEXT: uzp1 z1.b, z0.b, z1.b
+; CHECK-NEXT: uunpkhi z1.h, z1.b
+; CHECK-NEXT: uunpklo z2.s, z1.h
+; CHECK-NEXT: uunpkhi z1.s, z1.h
+; CHECK-NEXT: uunpklo z2.d, z2.s
+; CHECK-NEXT: uzp1 z2.s, z2.s, z0.s
+; CHECK-NEXT: uzp1 z1.h, z2.h, z1.h
+; CHECK-NEXT: uzp1 z1.b, z0.b, z1.b
+; CHECK-NEXT: uunpkhi z1.h, z1.b
+; CHECK-NEXT: uunpkhi z2.s, z1.h
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: uunpkhi z2.d, z2.s
+; CHECK-NEXT: uzp1 z2.s, z0.s, z2.s
+; CHECK-NEXT: uzp1 z1.h, z1.h, z2.h
+; CHECK-NEXT: uzp1 z1.b, z0.b, z1.b
+; CHECK-NEXT: uunpkhi z1.h, z1.b
+; CHECK-NEXT: uunpkhi z2.s, z1.h
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: uunpklo z2.d, z2.s
+; CHECK-NEXT: uzp1 z2.s, z2.s, z0.s
+; CHECK-NEXT: uzp1 z1.h, z1.h, z2.h
+; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b
+; CHECK-NEXT: ldr z1, [x0]
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: st1b { z0.d }, p0, [x1, x8]
+; CHECK-NEXT: str z1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 18 x i8>, ptr %a
+ store <vscale x 18 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv19i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv19i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w10, #19 // =0x13
+; CHECK-NEXT: lsr x9, x8, #4
+; CHECK-NEXT: mul x9, x9, x10
+; CHECK-NEXT: whilelo p0.b, x8, x9
+; CHECK-NEXT: whilelo p1.b, xzr, x9
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl]
+; CHECK-NEXT: st1b { z1.b }, p1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 19 x i8>, ptr %a
+ store <vscale x 19 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv20i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv20i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ld1b { z1.s }, p0/z, [x0, #4, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT: uzp1 z1.b, z0.b, z0.b
+; CHECK-NEXT: uunpkhi z1.h, z1.b
+; CHECK-NEXT: uunpkhi z1.s, z1.h
+; CHECK-NEXT: uzp1 z1.h, z0.h, z1.h
+; CHECK-NEXT: uzp1 z1.b, z0.b, z1.b
+; CHECK-NEXT: uunpkhi z1.h, z1.b
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: uzp1 z1.h, z1.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: st1b { z0.s }, p0, [x1, #4, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 20 x i8>, ptr %a
+ store <vscale x 20 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv21i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv21i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w10, #21 // =0x15
+; CHECK-NEXT: lsr x9, x8, #4
+; CHECK-NEXT: mul x9, x9, x10
+; CHECK-NEXT: whilelo p0.b, x8, x9
+; CHECK-NEXT: whilelo p1.b, xzr, x9
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl]
+; CHECK-NEXT: st1b { z1.b }, p1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 21 x i8>, ptr %a
+ store <vscale x 21 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv22i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv22i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: cntw x8, all, mul #5
+; CHECK-NEXT: ldr z2, [x0]
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, #4, mul vl]
+; CHECK-NEXT: ld1b { z1.d }, p1/z, [x0, x8]
+; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uzp1 z1.s, z1.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h
+; CHECK-NEXT: uzp1 z1.b, z0.b, z0.b
+; CHECK-NEXT: uunpkhi z1.h, z1.b
+; CHECK-NEXT: uunpkhi z1.s, z1.h
+; CHECK-NEXT: uzp1 z1.h, z0.h, z1.h
+; CHECK-NEXT: uzp1 z1.b, z0.b, z1.b
+; CHECK-NEXT: uunpkhi z1.h, z1.b
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: uzp1 z1.h, z1.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpkhi z1.s, z0.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uunpklo z1.d, z1.s
+; CHECK-NEXT: st1b { z1.d }, p1, [x1, x8]
+; CHECK-NEXT: st1b { z0.s }, p0, [x1, #4, mul vl]
+; CHECK-NEXT: str z2, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 22 x i8>, ptr %a
+ store <vscale x 22 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv23i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv23i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w10, #23 // =0x17
+; CHECK-NEXT: lsr x9, x8, #4
+; CHECK-NEXT: mul x9, x9, x10
+; CHECK-NEXT: whilelo p0.b, x8, x9
+; CHECK-NEXT: whilelo p1.b, xzr, x9
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl]
+; CHECK-NEXT: st1b { z1.b }, p1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 23 x i8>, ptr %a
+ store <vscale x 23 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv24i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv24i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ld1b { z1.h }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: st1b { z0.h }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 24 x i8>, ptr %a
+ store <vscale x 24 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv25i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv25i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w10, #25 // =0x19
+; CHECK-NEXT: lsr x9, x8, #4
+; CHECK-NEXT: mul x9, x9, x10
+; CHECK-NEXT: whilelo p0.b, x8, x9
+; CHECK-NEXT: whilelo p1.b, xzr, x9
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl]
+; CHECK-NEXT: st1b { z1.b }, p1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 25 x i8>, ptr %a
+ store <vscale x 25 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv26i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv26i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: cnth x8, all, mul #3
+; CHECK-NEXT: ldr z2, [x0]
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, x8]
+; CHECK-NEXT: ld1b { z1.h }, p1/z, [x0, #2, mul vl]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b
+; CHECK-NEXT: uunpkhi z1.h, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: uunpklo z1.d, z1.s
+; CHECK-NEXT: st1b { z1.d }, p0, [x1, x8]
+; CHECK-NEXT: st1b { z0.h }, p1, [x1, #2, mul vl]
+; CHECK-NEXT: str z2, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 26 x i8>, ptr %a
+ store <vscale x 26 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv27i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv27i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w10, #27 // =0x1b
+; CHECK-NEXT: lsr x9, x8, #4
+; CHECK-NEXT: mul x9, x9, x10
+; CHECK-NEXT: whilelo p0.b, x8, x9
+; CHECK-NEXT: whilelo p1.b, xzr, x9
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl]
+; CHECK-NEXT: st1b { z1.b }, p1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 27 x i8>, ptr %a
+ store <vscale x 27 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv28i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv28i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ldr z2, [x0]
+; CHECK-NEXT: ptrue p1.h
+; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, #6, mul vl]
+; CHECK-NEXT: ld1b { z1.h }, p1/z, [x0, #2, mul vl]
+; CHECK-NEXT: str z2, [x1]
+; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b
+; CHECK-NEXT: uunpkhi z1.h, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: st1b { z0.h }, p1, [x1, #2, mul vl]
+; CHECK-NEXT: st1b { z1.s }, p0, [x1, #6, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 28 x i8>, ptr %a
+ store <vscale x 28 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv29i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv29i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w10, #29 // =0x1d
+; CHECK-NEXT: lsr x9, x8, #4
+; CHECK-NEXT: mul x9, x9, x10
+; CHECK-NEXT: whilelo p0.b, x8, x9
+; CHECK-NEXT: whilelo p1.b, xzr, x9
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl]
+; CHECK-NEXT: st1b { z1.b }, p1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 29 x i8>, ptr %a
+ store <vscale x 29 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv30i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv30i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: cntw x8, all, mul #7
+; CHECK-NEXT: ldr z3, [x0]
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1b { z0.d }, p0/z, [x0, x8]
+; CHECK-NEXT: ptrue p2.h
+; CHECK-NEXT: ld1b { z1.s }, p1/z, [x0, #6, mul vl]
+; CHECK-NEXT: ld1b { z2.h }, p2/z, [x0, #2, mul vl]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uzp1 z0.b, z2.b, z0.b
+; CHECK-NEXT: uunpkhi z1.h, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uunpkhi z2.s, z1.h
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: uunpklo z2.d, z2.s
+; CHECK-NEXT: st1b { z2.d }, p0, [x1, x8]
+; CHECK-NEXT: st1b { z0.h }, p2, [x1, #2, mul vl]
+; CHECK-NEXT: st1b { z1.s }, p1, [x1, #6, mul vl]
+; CHECK-NEXT: str z3, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 30 x i8>, ptr %a
+ store <vscale x 30 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv31i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv31i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w10, #31 // =0x1f
+; CHECK-NEXT: lsr x9, x8, #4
+; CHECK-NEXT: mul x9, x9, x10
+; CHECK-NEXT: whilelo p0.b, x8, x9
+; CHECK-NEXT: whilelo p1.b, xzr, x9
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0]
+; CHECK-NEXT: st1b { z0.b }, p0, [x1, #1, mul vl]
+; CHECK-NEXT: st1b { z1.b }, p1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 31 x i8>, ptr %a
+ store <vscale x 31 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv32i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv32i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-NEXT: ldr z1, [x0]
+; CHECK-NEXT: str z0, [x1, #1, mul vl]
+; CHECK-NEXT: str z1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 32 x i8>, ptr %a
+ store <vscale x 32 x i8> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv1i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv1i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x i16>, ptr %a
+ store <vscale x 1 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv2i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv2i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x i16>, ptr %a
+ store <vscale x 2 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv3i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv3i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x i16>, ptr %a
+ store <vscale x 3 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv4i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv4i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x i16>, ptr %a
+ store <vscale x 4 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv5i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv5i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #5 // =0x5
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 5 x i16>, ptr %a
+ store <vscale x 5 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv6i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv6i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1h { z1.s }, p1/z, [x0]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uunpkhi z1.s, z0.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uunpklo z1.d, z1.s
+; CHECK-NEXT: st1h { z0.s }, p1, [x1]
+; CHECK-NEXT: st1h { z1.d }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 6 x i16>, ptr %a
+ store <vscale x 6 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv7i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv7i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #7 // =0x7
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 7 x i16>, ptr %a
+ store <vscale x 7 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv8i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 8 x i16>, ptr %a
+ store <vscale x 8 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv9i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv9i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #9 // =0x9
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 9 x i16>, ptr %a
+ store <vscale x 9 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv10i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv10i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, #4, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: uzp1 z0.s, z1.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: uzp1 z1.h, z0.h, z0.h
+; CHECK-NEXT: uunpkhi z1.s, z1.h
+; CHECK-NEXT: uunpkhi z1.d, z1.s
+; CHECK-NEXT: uzp1 z1.s, z0.s, z1.s
+; CHECK-NEXT: uzp1 z1.h, z0.h, z1.h
+; CHECK-NEXT: uunpkhi z1.s, z1.h
+; CHECK-NEXT: uunpklo z1.d, z1.s
+; CHECK-NEXT: uzp1 z1.s, z1.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: st1h { z0.d }, p0, [x1, #4, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 10 x i16>, ptr %a
+ store <vscale x 10 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv11i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv11i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #11 // =0xb
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 11 x i16>, ptr %a
+ store <vscale x 11 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv12i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv12i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ld1h { z1.s }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: st1h { z0.s }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 12 x i16>, ptr %a
+ store <vscale x 12 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv13i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv13i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #13 // =0xd
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 13 x i16>, ptr %a
+ store <vscale x 13 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv14i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv14i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ldr z2, [x0]
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #6, mul vl]
+; CHECK-NEXT: ld1h { z1.s }, p1/z, [x0, #2, mul vl]
+; CHECK-NEXT: str z2, [x1]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uunpkhi z1.s, z0.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: uunpklo z1.d, z1.s
+; CHECK-NEXT: st1h { z0.s }, p1, [x1, #2, mul vl]
+; CHECK-NEXT: st1h { z1.d }, p0, [x1, #6, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 14 x i16>, ptr %a
+ store <vscale x 14 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv15i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv15i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #15 // =0xf
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 15 x i16>, ptr %a
+ store <vscale x 15 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv16i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv16i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-NEXT: ldr z1, [x0]
+; CHECK-NEXT: str z0, [x1, #1, mul vl]
+; CHECK-NEXT: str z1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 16 x i16>, ptr %a
+ store <vscale x 16 x i16> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv1i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv1i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x i32>, ptr %a
+ store <vscale x 1 x i32> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv2i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv2i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x i32>, ptr %a
+ store <vscale x 2 x i32> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv3i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv3i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x i32>, ptr %a
+ store <vscale x 3 x i32> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv4i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x i32>, ptr %a
+ store <vscale x 4 x i32> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv5i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv5i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #5 // =0x5
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1w { z0.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1w { z1.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 5 x i32>, ptr %a
+ store <vscale x 5 x i32> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv6i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv6i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ld1w { z1.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: uzp1 z0.s, z1.s, z0.s
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: st1w { z0.d }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 6 x i32>, ptr %a
+ store <vscale x 6 x i32> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv7i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv7i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #7 // =0x7
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1w { z0.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1w { z1.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 7 x i32>, ptr %a
+ store <vscale x 7 x i32> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv8i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv8i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-NEXT: ldr z1, [x0]
+; CHECK-NEXT: str z0, [x1, #1, mul vl]
+; CHECK-NEXT: str z1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 8 x i32>, ptr %a
+ store <vscale x 8 x i32> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv1i64(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv1i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.d, xzr, x8
+; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1d { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x i64>, ptr %a
+ store <vscale x 1 x i64> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv2i64(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv2i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x i64>, ptr %a
+ store <vscale x 2 x i64> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv3i64(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv3i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1d { z0.d }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0]
+; CHECK-NEXT: st1d { z0.d }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1d { z1.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x i64>, ptr %a
+ store <vscale x 3 x i64> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv4i64(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv4i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-NEXT: ldr z1, [x0]
+; CHECK-NEXT: str z0, [x1, #1, mul vl]
+; CHECK-NEXT: str z1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x i64>, ptr %a
+ store <vscale x 4 x i64> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv1f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv1f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.d, xzr, x8
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x half>, ptr %a
+ store <vscale x 1 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv2f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv2f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x half>, ptr %a
+ store <vscale x 2 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv3f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv3f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x half>, ptr %a
+ store <vscale x 3 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv4f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv4f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x half>, ptr %a
+ store <vscale x 4 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv5f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv5f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #5 // =0x5
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 5 x half>, ptr %a
+ store <vscale x 5 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv6f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv6f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1h { z1.s }, p1/z, [x0]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: st1h { z1.s }, p1, [x1]
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uunpkhi z0.s, z0.h
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: st1h { z0.d }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 6 x half>, ptr %a
+ store <vscale x 6 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv7f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv7f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #7 // =0x7
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 7 x half>, ptr %a
+ store <vscale x 7 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv8f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv8f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 8 x half>, ptr %a
+ store <vscale x 8 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv9f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv9f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #9 // =0x9
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 9 x half>, ptr %a
+ store <vscale x 9 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv10f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv10f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, #4, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: st1h { z1.d }, p0, [x1, #4, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 10 x half>, ptr %a
+ store <vscale x 10 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv11f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv11f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #11 // =0xb
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 11 x half>, ptr %a
+ store <vscale x 11 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv12f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv12f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ld1h { z1.s }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: st1h { z1.s }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 12 x half>, ptr %a
+ store <vscale x 12 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv13f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv13f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #13 // =0xd
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 13 x half>, ptr %a
+ store <vscale x 13 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv14f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv14f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ldr z2, [x0]
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #6, mul vl]
+; CHECK-NEXT: ld1h { z1.s }, p1/z, [x0, #2, mul vl]
+; CHECK-NEXT: str z2, [x1]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: st1h { z1.s }, p1, [x1, #2, mul vl]
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uunpkhi z0.s, z0.h
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: st1h { z0.d }, p0, [x1, #6, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 14 x half>, ptr %a
+ store <vscale x 14 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv15f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv15f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #15 // =0xf
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 15 x half>, ptr %a
+ store <vscale x 15 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv16f16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv16f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-NEXT: ldr z1, [x0]
+; CHECK-NEXT: str z0, [x1, #1, mul vl]
+; CHECK-NEXT: str z1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 16 x half>, ptr %a
+ store <vscale x 16 x half> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv1f32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv1f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.d, xzr, x8
+; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x float>, ptr %a
+ store <vscale x 1 x float> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv2f32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x float>, ptr %a
+ store <vscale x 2 x float> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv3f32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv3f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x float>, ptr %a
+ store <vscale x 3 x float> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv4f32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x float>, ptr %a
+ store <vscale x 4 x float> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv5f32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv5f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #5 // =0x5
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1w { z0.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1w { z1.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 5 x float>, ptr %a
+ store <vscale x 5 x float> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv6f32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv6f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ld1w { z1.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: st1w { z1.d }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 6 x float>, ptr %a
+ store <vscale x 6 x float> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv7f32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv7f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #7 // =0x7
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1w { z0.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1w { z1.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 7 x float>, ptr %a
+ store <vscale x 7 x float> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv8f32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv8f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-NEXT: ldr z1, [x0]
+; CHECK-NEXT: str z0, [x1, #1, mul vl]
+; CHECK-NEXT: str z1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 8 x float>, ptr %a
+ store <vscale x 8 x float> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv1f64(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv1f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.d, xzr, x8
+; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1d { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x double>, ptr %a
+ store <vscale x 1 x double> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv2f64(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x double>, ptr %a
+ store <vscale x 2 x double> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv3f64(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv3f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1d { z0.d }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0]
+; CHECK-NEXT: st1d { z0.d }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1d { z1.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x double>, ptr %a
+ store <vscale x 3 x double> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv4f64(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv4f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-NEXT: ldr z1, [x0]
+; CHECK-NEXT: str z0, [x1, #1, mul vl]
+; CHECK-NEXT: str z1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x double>, ptr %a
+ store <vscale x 4 x double> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv1bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv1bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.d, xzr, x8
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x bfloat>, ptr %a
+ store <vscale x 1 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv2bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv2bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.d }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x bfloat>, ptr %a
+ store <vscale x 2 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv3bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv3bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x bfloat>, ptr %a
+ store <vscale x 3 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv4bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv4bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.s }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x bfloat>, ptr %a
+ store <vscale x 4 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv5bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv5bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #5 // =0x5
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 5 x bfloat>, ptr %a
+ store <vscale x 5 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv6bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv6bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1h { z1.s }, p1/z, [x0]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: st1h { z1.s }, p1, [x1]
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uunpkhi z0.s, z0.h
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: st1h { z0.d }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 6 x bfloat>, ptr %a
+ store <vscale x 6 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv7bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv7bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #7 // =0x7
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 7 x bfloat>, ptr %a
+ store <vscale x 7 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv8bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv8bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 8 x bfloat>, ptr %a
+ store <vscale x 8 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv9bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv9bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #9 // =0x9
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 9 x bfloat>, ptr %a
+ store <vscale x 9 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv10bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv10bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ld1h { z1.d }, p0/z, [x0, #4, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: st1h { z1.d }, p0, [x1, #4, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 10 x bfloat>, ptr %a
+ store <vscale x 10 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv11bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv11bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #11 // =0xb
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 11 x bfloat>, ptr %a
+ store <vscale x 11 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv12bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv12bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ldr z0, [x0]
+; CHECK-NEXT: ld1h { z1.s }, p0/z, [x0, #2, mul vl]
+; CHECK-NEXT: str z0, [x1]
+; CHECK-NEXT: st1h { z1.s }, p0, [x1, #2, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 12 x bfloat>, ptr %a
+ store <vscale x 12 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv13bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv13bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #13 // =0xd
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 13 x bfloat>, ptr %a
+ store <vscale x 13 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv14bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv14bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ldr z2, [x0]
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, #6, mul vl]
+; CHECK-NEXT: ld1h { z1.s }, p1/z, [x0, #2, mul vl]
+; CHECK-NEXT: str z2, [x1]
+; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
+; CHECK-NEXT: st1h { z1.s }, p1, [x1, #2, mul vl]
+; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h
+; CHECK-NEXT: uunpkhi z0.s, z0.h
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: st1h { z0.d }, p0, [x1, #6, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 14 x bfloat>, ptr %a
+ store <vscale x 14 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv15bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv15bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #15 // =0xf
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1h { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [x1, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 15 x bfloat>, ptr %a
+ store <vscale x 15 x bfloat> %c, ptr %b
+ ret void
+}
+
+define void @sve_load_store_nxv16bf16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_load_store_nxv16bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ldr z0, [x0, #1, mul vl]
+; CHECK-NEXT: ldr z1, [x0]
+; CHECK-NEXT: str z0, [x1, #1, mul vl]
+; CHECK-NEXT: str z1, [x1]
+; CHECK-NEXT: ret
+ %c = load <vscale x 16 x bfloat>, ptr %a
+ store <vscale x 16 x bfloat> %c, ptr %b
+ ret void
+}
+
+define <vscale x 1 x i16> @sve_sextload_nxv1i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv1i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x i8>, ptr %a
+ %c.sext = sext <vscale x 1 x i8> %c to <vscale x 1 x i16>
+ ret <vscale x 1 x i16> %c.sext
+}
+
+define <vscale x 2 x i16> @sve_sextload_nxv2i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv2i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x i8>, ptr %a
+ %c.sext = sext <vscale x 2 x i8> %c to <vscale x 2 x i16>
+ ret <vscale x 2 x i16> %c.sext
+}
+
+define <vscale x 3 x i16> @sve_sextload_nxv3i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv3i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x i8>, ptr %a
+ %c.sext = sext <vscale x 3 x i8> %c to <vscale x 3 x i16>
+ ret <vscale x 3 x i16> %c.sext
+}
+
+define <vscale x 4 x i16> @sve_sextload_nxv4i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv4i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x i8>, ptr %a
+ %c.sext = sext <vscale x 4 x i8> %c to <vscale x 4 x i16>
+ ret <vscale x 4 x i16> %c.sext
+}
+
+define <vscale x 5 x i16> @sve_sextload_nxv5i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv5i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #5 // =0x5
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 5 x i8>, ptr %a
+ %c.sext = sext <vscale x 5 x i8> %c to <vscale x 5 x i16>
+ ret <vscale x 5 x i16> %c.sext
+}
+
+define <vscale x 6 x i16> @sve_sextload_nxv6i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv6i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cntd x8, all, mul #3
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 6 x i8>, ptr %a
+ %c.sext = sext <vscale x 6 x i8> %c to <vscale x 6 x i16>
+ ret <vscale x 6 x i16> %c.sext
+}
+
+define <vscale x 7 x i16> @sve_sextload_nxv7i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv7i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #7 // =0x7
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 7 x i8>, ptr %a
+ %c.sext = sext <vscale x 7 x i8> %c to <vscale x 7 x i16>
+ ret <vscale x 7 x i16> %c.sext
+}
+
+define <vscale x 8 x i16> @sve_sextload_nxv8i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv8i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 8 x i8>, ptr %a
+ %c.sext = sext <vscale x 8 x i8> %c to <vscale x 8 x i16>
+ ret <vscale x 8 x i16> %c.sext
+}
+
+define <vscale x 9 x i16> @sve_sextload_nxv9i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv9i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #9 // =0x9
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 9 x i8>, ptr %a
+ %c.sext = sext <vscale x 9 x i8> %c to <vscale x 9 x i16>
+ ret <vscale x 9 x i16> %c.sext
+}
+
+define <vscale x 10 x i16> @sve_sextload_nxv10i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv10i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: cntd x8, all, mul #5
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: str z1, [sp]
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: st1h { z0.d }, p0, [sp, #4, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 10 x i8>, ptr %a
+ %c.sext = sext <vscale x 10 x i8> %c to <vscale x 10 x i16>
+ ret <vscale x 10 x i16> %c.sext
+}
+
+define <vscale x 11 x i16> @sve_sextload_nxv11i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv11i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #11 // =0xb
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 11 x i8>, ptr %a
+ %c.sext = sext <vscale x 11 x i8> %c to <vscale x 11 x i16>
+ ret <vscale x 11 x i16> %c.sext
+}
+
+define <vscale x 12 x i16> @sve_sextload_nxv12i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv12i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: cntw x8, all, mul #3
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: str z1, [sp]
+; CHECK-NEXT: st1h { z0.s }, p1, [sp, #2, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 12 x i8>, ptr %a
+ %c.sext = sext <vscale x 12 x i8> %c to <vscale x 12 x i16>
+ ret <vscale x 12 x i16> %c.sext
+}
+
+define <vscale x 13 x i16> @sve_sextload_nxv13i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv13i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #13 // =0xd
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 13 x i8>, ptr %a
+ %c.sext = sext <vscale x 13 x i8> %c to <vscale x 13 x i16>
+ ret <vscale x 13 x i16> %c.sext
+}
+
+define <vscale x 14 x i16> @sve_sextload_nxv14i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv14i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: cntd x8, all, mul #7
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1sb { z2.h }, p0/z, [x0]
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: uunpkhi z1.s, z0.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: str z2, [sp]
+; CHECK-NEXT: uunpklo z1.d, z1.s
+; CHECK-NEXT: st1h { z0.s }, p1, [sp, #2, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: st1h { z1.d }, p0, [sp, #6, mul vl]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 14 x i8>, ptr %a
+ %c.sext = sext <vscale x 14 x i8> %c to <vscale x 14 x i16>
+ ret <vscale x 14 x i16> %c.sext
+}
+
+define <vscale x 15 x i16> @sve_sextload_nxv15i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv15i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #15 // =0xf
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 15 x i8>, ptr %a
+ %c.sext = sext <vscale x 15 x i8> %c to <vscale x 15 x i16>
+ ret <vscale x 15 x i16> %c.sext
+}
+
+define <vscale x 16 x i16> @sve_sextload_nxv16i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 16 x i8>, ptr %a
+ %c.sext = sext <vscale x 16 x i8> %c to <vscale x 16 x i16>
+ ret <vscale x 16 x i16> %c.sext
+}
+
+define <vscale x 1 x i32> @sve_sextload_nxv1i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv1i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x i16>, ptr %a
+ %c.sext = sext <vscale x 1 x i16> %c to <vscale x 1 x i32>
+ ret <vscale x 1 x i32> %c.sext
+}
+
+define <vscale x 2 x i32> @sve_sextload_nxv2i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv2i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x i16>, ptr %a
+ %c.sext = sext <vscale x 2 x i16> %c to <vscale x 2 x i32>
+ ret <vscale x 2 x i32> %c.sext
+}
+
+define <vscale x 3 x i32> @sve_sextload_nxv3i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv3i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x i16>, ptr %a
+ %c.sext = sext <vscale x 3 x i16> %c to <vscale x 3 x i32>
+ ret <vscale x 3 x i32> %c.sext
+}
+
+define <vscale x 4 x i32> @sve_sextload_nxv4i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv4i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x i16>, ptr %a
+ %c.sext = sext <vscale x 4 x i16> %c to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %c.sext
+}
+
+define <vscale x 5 x i32> @sve_sextload_nxv5i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv5i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #5 // =0x5
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sh { z0.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1w { z1.s }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 5 x i16>, ptr %a
+ %c.sext = sext <vscale x 5 x i16> %c to <vscale x 5 x i32>
+ ret <vscale x 5 x i32> %c.sext
+}
+
+define <vscale x 6 x i32> @sve_sextload_nxv6i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv6i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: cntd x8, all, mul #3
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sh { z0.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0]
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: str z1, [sp]
+; CHECK-NEXT: st1w { z0.d }, p1, [sp, #2, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 6 x i16>, ptr %a
+ %c.sext = sext <vscale x 6 x i16> %c to <vscale x 6 x i32>
+ ret <vscale x 6 x i32> %c.sext
+}
+
+define <vscale x 7 x i32> @sve_sextload_nxv7i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv7i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #7 // =0x7
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sh { z0.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1w { z1.s }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 7 x i16>, ptr %a
+ %c.sext = sext <vscale x 7 x i16> %c to <vscale x 7 x i32>
+ ret <vscale x 7 x i32> %c.sext
+}
+
+define <vscale x 8 x i32> @sve_sextload_nxv8i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 8 x i16>, ptr %a
+ %c.sext = sext <vscale x 8 x i16> %c to <vscale x 8 x i32>
+ ret <vscale x 8 x i32> %c.sext
+}
+
+define <vscale x 1 x i64> @sve_sextload_nxv1i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv1i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.d, xzr, x8
+; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x i32>, ptr %a
+ %c.sext = sext <vscale x 1 x i32> %c to <vscale x 1 x i64>
+ ret <vscale x 1 x i64> %c.sext
+}
+
+define <vscale x 2 x i64> @sve_sextload_nxv2i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv2i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x i32>, ptr %a
+ %c.sext = sext <vscale x 2 x i32> %c to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %c.sext
+}
+
+define <vscale x 3 x i64> @sve_sextload_nxv3i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv3i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sw { z0.d }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sw { z1.d }, p0/z, [x0]
+; CHECK-NEXT: st1d { z0.d }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1d { z1.d }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x i32>, ptr %a
+ %c.sext = sext <vscale x 3 x i32> %c to <vscale x 3 x i64>
+ ret <vscale x 3 x i64> %c.sext
+}
+
+define <vscale x 4 x i64> @sve_sextload_nxv4i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_sextload_nxv4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1sw { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x i32>, ptr %a
+ %c.sext = sext <vscale x 4 x i32> %c to <vscale x 4 x i64>
+ ret <vscale x 4 x i64> %c.sext
+}
+
+define <vscale x 1 x i16> @sve_zextload_nxv1i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv1i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x i8>, ptr %a
+ %c.zext = sext <vscale x 1 x i8> %c to <vscale x 1 x i16>
+ ret <vscale x 1 x i16> %c.zext
+}
+
+define <vscale x 2 x i16> @sve_zextload_nxv2i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv2i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x i8>, ptr %a
+ %c.zext = sext <vscale x 2 x i8> %c to <vscale x 2 x i16>
+ ret <vscale x 2 x i16> %c.zext
+}
+
+define <vscale x 3 x i16> @sve_zextload_nxv3i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv3i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x i8>, ptr %a
+ %c.zext = sext <vscale x 3 x i8> %c to <vscale x 3 x i16>
+ ret <vscale x 3 x i16> %c.zext
+}
+
+define <vscale x 4 x i16> @sve_zextload_nxv4i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv4i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x i8>, ptr %a
+ %c.zext = sext <vscale x 4 x i8> %c to <vscale x 4 x i16>
+ ret <vscale x 4 x i16> %c.zext
+}
+
+define <vscale x 5 x i16> @sve_zextload_nxv5i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv5i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #5 // =0x5
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 5 x i8>, ptr %a
+ %c.zext = sext <vscale x 5 x i8> %c to <vscale x 5 x i16>
+ ret <vscale x 5 x i16> %c.zext
+}
+
+define <vscale x 6 x i16> @sve_zextload_nxv6i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv6i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cntd x8, all, mul #3
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 6 x i8>, ptr %a
+ %c.zext = sext <vscale x 6 x i8> %c to <vscale x 6 x i16>
+ ret <vscale x 6 x i16> %c.zext
+}
+
+define <vscale x 7 x i16> @sve_zextload_nxv7i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv7i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #7 // =0x7
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 7 x i8>, ptr %a
+ %c.zext = sext <vscale x 7 x i8> %c to <vscale x 7 x i16>
+ ret <vscale x 7 x i16> %c.zext
+}
+
+define <vscale x 8 x i16> @sve_zextload_nxv8i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv8i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 8 x i8>, ptr %a
+ %c.zext = sext <vscale x 8 x i8> %c to <vscale x 8 x i16>
+ ret <vscale x 8 x i16> %c.zext
+}
+
+define <vscale x 9 x i16> @sve_zextload_nxv9i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv9i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #9 // =0x9
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 9 x i8>, ptr %a
+ %c.zext = sext <vscale x 9 x i8> %c to <vscale x 9 x i16>
+ ret <vscale x 9 x i16> %c.zext
+}
+
+define <vscale x 10 x i16> @sve_zextload_nxv10i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv10i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: cntd x8, all, mul #5
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: str z1, [sp]
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: st1h { z0.d }, p0, [sp, #4, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 10 x i8>, ptr %a
+ %c.zext = sext <vscale x 10 x i8> %c to <vscale x 10 x i16>
+ ret <vscale x 10 x i16> %c.zext
+}
+
+define <vscale x 11 x i16> @sve_zextload_nxv11i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv11i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #11 // =0xb
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 11 x i8>, ptr %a
+ %c.zext = sext <vscale x 11 x i8> %c to <vscale x 11 x i16>
+ ret <vscale x 11 x i16> %c.zext
+}
+
+define <vscale x 12 x i16> @sve_zextload_nxv12i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv12i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: cntw x8, all, mul #3
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: str z1, [sp]
+; CHECK-NEXT: st1h { z0.s }, p1, [sp, #2, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 12 x i8>, ptr %a
+ %c.zext = sext <vscale x 12 x i8> %c to <vscale x 12 x i16>
+ ret <vscale x 12 x i16> %c.zext
+}
+
+define <vscale x 13 x i16> @sve_zextload_nxv13i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv13i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #13 // =0xd
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 13 x i8>, ptr %a
+ %c.zext = sext <vscale x 13 x i8> %c to <vscale x 13 x i16>
+ ret <vscale x 13 x i16> %c.zext
+}
+
+define <vscale x 14 x i16> @sve_zextload_nxv14i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv14i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: cntd x8, all, mul #7
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ptrue p1.s
+; CHECK-NEXT: ld1sb { z2.h }, p0/z, [x0]
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: uunpkhi z1.s, z0.h
+; CHECK-NEXT: uunpklo z0.s, z0.h
+; CHECK-NEXT: str z2, [sp]
+; CHECK-NEXT: uunpklo z1.d, z1.s
+; CHECK-NEXT: st1h { z0.s }, p1, [sp, #2, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: st1h { z1.d }, p0, [sp, #6, mul vl]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 14 x i8>, ptr %a
+ %c.zext = sext <vscale x 14 x i8> %c to <vscale x 14 x i16>
+ ret <vscale x 14 x i16> %c.zext
+}
+
+define <vscale x 15 x i16> @sve_zextload_nxv15i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv15i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #15 // =0xf
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.b, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sb { z0.h }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
+; CHECK-NEXT: st1h { z0.h }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1h { z1.h }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 15 x i8>, ptr %a
+ %c.zext = sext <vscale x 15 x i8> %c to <vscale x 15 x i16>
+ ret <vscale x 15 x i16> %c.zext
+}
+
+define <vscale x 16 x i16> @sve_zextload_nxv16i8(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0]
+; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 16 x i8>, ptr %a
+ %c.zext = sext <vscale x 16 x i8> %c to <vscale x 16 x i16>
+ ret <vscale x 16 x i16> %c.zext
+}
+
+define <vscale x 1 x i32> @sve_zextload_nxv1i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv1i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x i16>, ptr %a
+ %c.zext = sext <vscale x 1 x i16> %c to <vscale x 1 x i32>
+ ret <vscale x 1 x i32> %c.zext
+}
+
+define <vscale x 2 x i32> @sve_zextload_nxv2i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv2i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x i16>, ptr %a
+ %c.zext = sext <vscale x 2 x i16> %c to <vscale x 2 x i32>
+ ret <vscale x 2 x i32> %c.zext
+}
+
+define <vscale x 3 x i32> @sve_zextload_nxv3i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv3i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x i16>, ptr %a
+ %c.zext = sext <vscale x 3 x i16> %c to <vscale x 3 x i32>
+ ret <vscale x 3 x i32> %c.zext
+}
+
+define <vscale x 4 x i32> @sve_zextload_nxv4i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv4i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x i16>, ptr %a
+ %c.zext = sext <vscale x 4 x i16> %c to <vscale x 4 x i32>
+ ret <vscale x 4 x i32> %c.zext
+}
+
+define <vscale x 5 x i32> @sve_zextload_nxv5i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv5i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #5 // =0x5
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sh { z0.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1w { z1.s }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 5 x i16>, ptr %a
+ %c.zext = sext <vscale x 5 x i16> %c to <vscale x 5 x i32>
+ ret <vscale x 5 x i32> %c.zext
+}
+
+define <vscale x 6 x i32> @sve_zextload_nxv6i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv6i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: cntd x8, all, mul #3
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sh { z0.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ptrue p1.d
+; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0]
+; CHECK-NEXT: uunpklo z0.d, z0.s
+; CHECK-NEXT: str z1, [sp]
+; CHECK-NEXT: st1w { z0.d }, p1, [sp, #2, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 6 x i16>, ptr %a
+ %c.zext = sext <vscale x 6 x i16> %c to <vscale x 6 x i32>
+ ret <vscale x 6 x i32> %c.zext
+}
+
+define <vscale x 7 x i32> @sve_zextload_nxv7i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv7i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #7 // =0x7
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.h, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sh { z0.s }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0]
+; CHECK-NEXT: st1w { z0.s }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1w { z1.s }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 7 x i16>, ptr %a
+ %c.zext = sext <vscale x 7 x i16> %c to <vscale x 7 x i32>
+ ret <vscale x 7 x i32> %c.zext
+}
+
+define <vscale x 8 x i32> @sve_zextload_nxv8i16(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x0]
+; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 8 x i16>, ptr %a
+ %c.zext = sext <vscale x 8 x i16> %c to <vscale x 8 x i32>
+ ret <vscale x 8 x i32> %c.zext
+}
+
+define <vscale x 1 x i64> @sve_zextload_nxv1i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv1i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: whilelo p0.d, xzr, x8
+; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 1 x i32>, ptr %a
+ %c.zext = sext <vscale x 1 x i32> %c to <vscale x 1 x i64>
+ ret <vscale x 1 x i64> %c.zext
+}
+
+define <vscale x 2 x i64> @sve_zextload_nxv2i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv2i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ret
+ %c = load <vscale x 2 x i32>, ptr %a
+ %c.zext = sext <vscale x 2 x i32> %c to <vscale x 2 x i64>
+ ret <vscale x 2 x i64> %c.zext
+}
+
+define <vscale x 3 x i64> @sve_zextload_nxv3i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv3i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: addvl sp, sp, #-2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x40, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: mov w9, #3 // =0x3
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: mul x8, x8, x9
+; CHECK-NEXT: whilelo p0.s, xzr, x8
+; CHECK-NEXT: punpkhi p1.h, p0.b
+; CHECK-NEXT: punpklo p0.h, p0.b
+; CHECK-NEXT: ld1sw { z0.d }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1sw { z1.d }, p0/z, [x0]
+; CHECK-NEXT: st1d { z0.d }, p1, [sp, #1, mul vl]
+; CHECK-NEXT: st1d { z1.d }, p0, [sp]
+; CHECK-NEXT: ldr z1, [sp, #1, mul vl]
+; CHECK-NEXT: ldr z0, [sp]
+; CHECK-NEXT: addvl sp, sp, #2
+; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %c = load <vscale x 3 x i32>, ptr %a
+ %c.zext = sext <vscale x 3 x i32> %c to <vscale x 3 x i64>
+ ret <vscale x 3 x i64> %c.zext
+}
+
+define <vscale x 4 x i64> @sve_zextload_nxv4i32(ptr %a, ptr %b) {
+; CHECK-LABEL: sve_zextload_nxv4i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x0]
+; CHECK-NEXT: ld1sw { z1.d }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ret
+ %c = load <vscale x 4 x i32>, ptr %a
+ %c.zext = sext <vscale x 4 x i32> %c to <vscale x 4 x i64>
+ ret <vscale x 4 x i64> %c.zext
+}
diff --git a/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll b/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll
index 2cbb29e..d8de12c 100644
--- a/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll
+++ b/llvm/test/CodeGen/AArch64/sve-stack-frame-layout.ll
@@ -672,5 +672,3 @@ entry:
ret i32 %x
}
declare void @other()
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK-FRAMELAYOUT: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/tbz-tbnz.ll b/llvm/test/CodeGen/AArch64/tbz-tbnz.ll
index 4a04934..6946cc2 100644
--- a/llvm/test/CodeGen/AArch64/tbz-tbnz.ll
+++ b/llvm/test/CodeGen/AArch64/tbz-tbnz.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=aarch64 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
-; RUN: llc < %s -mtriple=aarch64 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+; RUN: llc < %s -mtriple=aarch64 -O3 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple=aarch64 -O3 -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
declare void @t()
@@ -581,3 +581,323 @@ end:
ret void
}
+define ptr @tbnz_wzr(i1 %cmp1.not.i, ptr %locflg) {
+; CHECK-SD-LABEL: tbnz_wzr:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: tbz w0, #0, .LBB20_2
+; CHECK-SD-NEXT: // %bb.1:
+; CHECK-SD-NEXT: tbnz wzr, #0, .LBB20_3
+; CHECK-SD-NEXT: b .LBB20_4
+; CHECK-SD-NEXT: .LBB20_2: // %opnfil.exit.thread
+; CHECK-SD-NEXT: mov w8, #1 // =0x1
+; CHECK-SD-NEXT: str wzr, [x1]
+; CHECK-SD-NEXT: tbz w8, #0, .LBB20_4
+; CHECK-SD-NEXT: .LBB20_3: // %if.else25
+; CHECK-SD-NEXT: str wzr, [x1]
+; CHECK-SD-NEXT: .LBB20_4: // %common.ret
+; CHECK-SD-NEXT: mov x0, xzr
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: tbnz_wzr:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov w8, #0 // =0x0
+; CHECK-GI-NEXT: tbz w0, #0, .LBB20_3
+; CHECK-GI-NEXT: // %bb.1: // %if.end10
+; CHECK-GI-NEXT: tbnz w8, #0, .LBB20_4
+; CHECK-GI-NEXT: .LBB20_2: // %common.ret
+; CHECK-GI-NEXT: mov x0, xzr
+; CHECK-GI-NEXT: ret
+; CHECK-GI-NEXT: .LBB20_3: // %opnfil.exit.thread
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: str wzr, [x1]
+; CHECK-GI-NEXT: tbz w8, #0, .LBB20_2
+; CHECK-GI-NEXT: .LBB20_4: // %if.else25
+; CHECK-GI-NEXT: str wzr, [x1]
+; CHECK-GI-NEXT: mov x0, xzr
+; CHECK-GI-NEXT: ret
+entry:
+ br i1 %cmp1.not.i, label %if.end10, label %opnfil.exit.thread
+
+opnfil.exit.thread: ; preds = %entry
+ store i32 0, ptr %locflg, align 4
+ br label %if.end10
+
+if.end10: ; preds = %opnfil.exit.thread, %entry
+ %cmp5 = phi i1 [ true, %opnfil.exit.thread ], [ false, %entry ]
+ br i1 %cmp5, label %if.else25, label %if.then12
+
+if.then12: ; preds = %if.end10
+ %call20 = load i32, ptr null, align 4
+ br label %if.end26
+
+if.else25: ; preds = %if.end10
+ store i32 0, ptr %locflg, align 4
+ br label %if.end26
+
+if.end26: ; preds = %if.else25, %if.then12
+ br i1 %cmp5, label %common.ret, label %if.then28
+
+common.ret: ; preds = %if.then28, %if.end26
+ %common.ret.op = phi ptr [ null, %if.then28 ], [ null, %if.end26 ]
+ ret ptr %common.ret.op
+
+if.then28: ; preds = %if.end26
+ %0 = load ptr, ptr null, align 8
+ br label %common.ret
+}
+
+define ptr @tbz_wzr(i1 %cmp1.not.i, ptr %locflg) {
+; CHECK-SD-LABEL: tbz_wzr:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: tbz w0, #0, .LBB21_2
+; CHECK-SD-NEXT: // %bb.1:
+; CHECK-SD-NEXT: mov w8, #1 // =0x1
+; CHECK-SD-NEXT: tbnz w8, #0, .LBB21_3
+; CHECK-SD-NEXT: b .LBB21_4
+; CHECK-SD-NEXT: .LBB21_2: // %opnfil.exit.thread
+; CHECK-SD-NEXT: str wzr, [x1]
+; CHECK-SD-NEXT: tbz wzr, #0, .LBB21_4
+; CHECK-SD-NEXT: .LBB21_3: // %if.else25
+; CHECK-SD-NEXT: str wzr, [x1]
+; CHECK-SD-NEXT: .LBB21_4: // %common.ret
+; CHECK-SD-NEXT: mov x0, xzr
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: tbz_wzr:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: tbz w0, #0, .LBB21_3
+; CHECK-GI-NEXT: // %bb.1: // %if.end10
+; CHECK-GI-NEXT: tbnz w8, #0, .LBB21_4
+; CHECK-GI-NEXT: .LBB21_2: // %common.ret
+; CHECK-GI-NEXT: mov x0, xzr
+; CHECK-GI-NEXT: ret
+; CHECK-GI-NEXT: .LBB21_3: // %opnfil.exit.thread
+; CHECK-GI-NEXT: mov w8, #0 // =0x0
+; CHECK-GI-NEXT: str wzr, [x1]
+; CHECK-GI-NEXT: tbz w8, #0, .LBB21_2
+; CHECK-GI-NEXT: .LBB21_4: // %if.else25
+; CHECK-GI-NEXT: str wzr, [x1]
+; CHECK-GI-NEXT: mov x0, xzr
+; CHECK-GI-NEXT: ret
+entry:
+ br i1 %cmp1.not.i, label %if.end10, label %opnfil.exit.thread
+
+opnfil.exit.thread: ; preds = %entry
+ store i32 0, ptr %locflg, align 4
+ br label %if.end10
+
+if.end10: ; preds = %opnfil.exit.thread, %entry
+ %cmp5 = phi i1 [ false, %opnfil.exit.thread ], [ true, %entry ]
+ br i1 %cmp5, label %if.else25, label %if.then12
+
+if.then12: ; preds = %if.end10
+ %call20 = load i32, ptr null, align 4
+ br label %if.end26
+
+if.else25: ; preds = %if.end10
+ store i32 0, ptr %locflg, align 4
+ br label %if.end26
+
+if.end26: ; preds = %if.else25, %if.then12
+ br i1 %cmp5, label %common.ret, label %if.then28
+
+common.ret: ; preds = %if.then28, %if.end26
+ %common.ret.op = phi ptr [ null, %if.then28 ], [ null, %if.end26 ]
+ ret ptr %common.ret.op
+
+if.then28: ; preds = %if.end26
+ %0 = load ptr, ptr null, align 8
+ br label %common.ret
+}
+
+define ptr @cbnz_wzr(i1 %cmp1.not.i, ptr %locflg) {
+; CHECK-SD-LABEL: cbnz_wzr:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: tbz w0, #0, .LBB22_2
+; CHECK-SD-NEXT: // %bb.1:
+; CHECK-SD-NEXT: cbnz wzr, .LBB22_3
+; CHECK-SD-NEXT: b .LBB22_4
+; CHECK-SD-NEXT: .LBB22_2: // %opnfil.exit.thread
+; CHECK-SD-NEXT: mov w8, #10 // =0xa
+; CHECK-SD-NEXT: str wzr, [x1]
+; CHECK-SD-NEXT: cbz w8, .LBB22_4
+; CHECK-SD-NEXT: .LBB22_3: // %if.else25
+; CHECK-SD-NEXT: str wzr, [x1]
+; CHECK-SD-NEXT: .LBB22_4: // %common.ret
+; CHECK-SD-NEXT: mov x0, xzr
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: cbnz_wzr:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov w8, wzr
+; CHECK-GI-NEXT: tbnz w0, #0, .LBB22_2
+; CHECK-GI-NEXT: // %bb.1: // %opnfil.exit.thread
+; CHECK-GI-NEXT: mov w8, #10 // =0xa
+; CHECK-GI-NEXT: str wzr, [x1]
+; CHECK-GI-NEXT: .LBB22_2: // %if.end10
+; CHECK-GI-NEXT: cbz w8, .LBB22_4
+; CHECK-GI-NEXT: // %bb.3: // %if.else25
+; CHECK-GI-NEXT: str wzr, [x1]
+; CHECK-GI-NEXT: .LBB22_4: // %common.ret
+; CHECK-GI-NEXT: mov x0, xzr
+; CHECK-GI-NEXT: ret
+entry:
+ br i1 %cmp1.not.i, label %if.end10, label %opnfil.exit.thread
+
+opnfil.exit.thread: ; preds = %entry
+ store i32 0, ptr %locflg, align 4
+ br label %if.end10
+
+if.end10: ; preds = %opnfil.exit.thread, %entry
+ %cmp5 = phi i32 [ 10, %opnfil.exit.thread ], [ 0, %entry ]
+ %cmp5b = icmp ne i32 %cmp5, 0
+ br i1 %cmp5b, label %if.else25, label %if.then12
+
+if.then12: ; preds = %if.end10
+ %call20 = load i32, ptr null, align 4
+ br label %if.end26
+
+if.else25: ; preds = %if.end10
+ store i32 0, ptr %locflg, align 4
+ br label %if.end26
+
+if.end26: ; preds = %if.else25, %if.then12
+ br i1 %cmp5b, label %common.ret, label %if.then28
+
+common.ret: ; preds = %if.then28, %if.end26
+ %common.ret.op = phi ptr [ null, %if.then28 ], [ null, %if.end26 ]
+ ret ptr %common.ret.op
+
+if.then28: ; preds = %if.end26
+ %0 = load ptr, ptr null, align 8
+ br label %common.ret
+}
+
+define ptr @cbz_wzr(i1 %cmp1.not.i, ptr %locflg) {
+; CHECK-SD-LABEL: cbz_wzr:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: tbz w0, #0, .LBB23_2
+; CHECK-SD-NEXT: // %bb.1:
+; CHECK-SD-NEXT: mov w8, #10 // =0xa
+; CHECK-SD-NEXT: cbnz w8, .LBB23_3
+; CHECK-SD-NEXT: b .LBB23_4
+; CHECK-SD-NEXT: .LBB23_2: // %opnfil.exit.thread
+; CHECK-SD-NEXT: str wzr, [x1]
+; CHECK-SD-NEXT: cbz wzr, .LBB23_4
+; CHECK-SD-NEXT: .LBB23_3: // %if.else25
+; CHECK-SD-NEXT: str wzr, [x1]
+; CHECK-SD-NEXT: .LBB23_4: // %common.ret
+; CHECK-SD-NEXT: mov x0, xzr
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: cbz_wzr:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov w8, #10 // =0xa
+; CHECK-GI-NEXT: tbnz w0, #0, .LBB23_2
+; CHECK-GI-NEXT: // %bb.1: // %opnfil.exit.thread
+; CHECK-GI-NEXT: mov w8, wzr
+; CHECK-GI-NEXT: str wzr, [x1]
+; CHECK-GI-NEXT: .LBB23_2: // %if.end10
+; CHECK-GI-NEXT: cbz w8, .LBB23_4
+; CHECK-GI-NEXT: // %bb.3: // %if.else25
+; CHECK-GI-NEXT: str wzr, [x1]
+; CHECK-GI-NEXT: .LBB23_4: // %common.ret
+; CHECK-GI-NEXT: mov x0, xzr
+; CHECK-GI-NEXT: ret
+entry:
+ br i1 %cmp1.not.i, label %if.end10, label %opnfil.exit.thread
+
+opnfil.exit.thread: ; preds = %entry
+ store i32 0, ptr %locflg, align 4
+ br label %if.end10
+
+if.end10: ; preds = %opnfil.exit.thread, %entry
+ %cmp5 = phi i32 [ 0, %opnfil.exit.thread ], [ 10, %entry ]
+ %cmp5b = icmp ne i32 %cmp5, 0
+ br i1 %cmp5b, label %if.else25, label %if.then12
+
+if.then12: ; preds = %if.end10
+ %call20 = load i32, ptr null, align 4
+ br label %if.end26
+
+if.else25: ; preds = %if.end10
+ store i32 0, ptr %locflg, align 4
+ br label %if.end26
+
+if.end26: ; preds = %if.else25, %if.then12
+ br i1 %cmp5b, label %common.ret, label %if.then28
+
+common.ret: ; preds = %if.then28, %if.end26
+ %common.ret.op = phi ptr [ null, %if.then28 ], [ null, %if.end26 ]
+ ret ptr %common.ret.op
+
+if.then28: ; preds = %if.end26
+ %0 = load ptr, ptr null, align 8
+ br label %common.ret
+}
+
+define i1 @avifSequenceHeaderParse() {
+; CHECK-SD-LABEL: avifSequenceHeaderParse:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: mov w8, #1 // =0x1
+; CHECK-SD-NEXT: cbz w8, .LBB24_2
+; CHECK-SD-NEXT: .LBB24_1: // %bb6
+; CHECK-SD-NEXT: mov w0, wzr
+; CHECK-SD-NEXT: ret
+; CHECK-SD-NEXT: .LBB24_2: // %bb1
+; CHECK-SD-NEXT: cbz w8, .LBB24_4
+; CHECK-SD-NEXT: // %bb.3:
+; CHECK-SD-NEXT: tbz xzr, #63, .LBB24_1
+; CHECK-SD-NEXT: b .LBB24_5
+; CHECK-SD-NEXT: .LBB24_4: // %bb2
+; CHECK-SD-NEXT: mov w8, #1 // =0x1
+; CHECK-SD-NEXT: tbz x8, #63, .LBB24_1
+; CHECK-SD-NEXT: .LBB24_5: // %bb4
+; CHECK-SD-NEXT: mov w8, #1 // =0x1
+; CHECK-SD-NEXT: mov w0, wzr
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: avifSequenceHeaderParse:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov w0, wzr
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: ret
+entry:
+ %a = icmp slt i64 0, 0
+ br i1 %a, label %bb1, label %bb6
+
+bb1: ; preds = %entry
+ %b = icmp eq i32 1, 0
+ br i1 %b, label %bb2, label %bb3
+
+bb2: ; preds = %bb1
+ %c = load i8, ptr null, align 1
+ %d = zext i8 1 to i64
+ %e = shl i64 %d, 0
+ br label %bb3
+
+bb3: ; preds = %bb2, %bb1
+ %f = phi i64 [ %e, %bb2 ], [ 0, %bb1 ]
+ %g = icmp slt i64 %f, 0
+ br i1 %g, label %bb4, label %bb6
+
+bb4: ; preds = %bb3
+ %h = icmp eq i32 1, 0
+ br i1 %h, label %bb5, label %bb7
+
+bb5: ; preds = %bb4
+ %i = load i8, ptr null, align 1
+ %j = shl i64 0, 0
+ br label %bb7
+
+bb6: ; preds = %bb7, %bb3, %entry
+ %k = phi i1 [ false, %bb7 ], [ false, %bb3 ], [ false, %entry ]
+ ret i1 %k
+
+bb7: ; preds = %bb5, %bb4
+ %l = phi ptr [ inttoptr (i64 1 to ptr), %bb5 ], [ null, %bb4 ]
+ %m = phi i64 [ %j, %bb5 ], [ 0, %bb4 ]
+ %n = icmp ult ptr %l, null
+ br label %bb6
+}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll
index 666523c..ff618c0 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmax.ll
@@ -1812,26 +1812,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, s16
-; GFX12-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1
+; GFX12-NEXT: v_mov_b32_e32 v8, s16
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1]
; GFX12-NEXT: s_mov_b32 s4, 0
-; GFX12-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], null offen
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3]
+; GFX12-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], null offen
; GFX12-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10]
-; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[0:1], v[6:7]
+; GFX12-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_mov_b32_e32 v3, v5
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1854,27 +1854,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, s16
-; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1
+; GFX11-NEXT: v_mov_b32_e32 v8, s16
+; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], 0 offen
-; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX11-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], 0 offen
; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1906,28 +1906,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v6, s20
-; GFX908-NEXT: v_mov_b32_e32 v2, v0
-; GFX908-NEXT: v_mov_b32_e32 v3, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen
-; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX908-NEXT: v_mov_b32_e32 v8, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen
+; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX908-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX908-NEXT: v_mov_b32_e32 v0, v2
+; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_mov_b32_e32 v2, v4
+; GFX908-NEXT: v_mov_b32_e32 v3, v5
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB14_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1937,28 +1935,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v6, s20
-; GFX8-NEXT: v_mov_b32_e32 v2, v0
-; GFX8-NEXT: v_mov_b32_e32 v3, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen
-; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v8, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen
+; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_mov_b32_e32 v2, v4
+; GFX8-NEXT: v_mov_b32_e32 v3, v5
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB14_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll
index 3515028..007417c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/atomicrmw_fmin.ll
@@ -1812,26 +1812,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_mov_b32_e32 v6, s16
-; GFX12-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1
+; GFX12-NEXT: v_mov_b32_e32 v8, s16
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1]
; GFX12-NEXT: s_mov_b32 s4, 0
-; GFX12-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], null offen
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3]
+; GFX12-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], null offen
; GFX12-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10]
-; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[0:1], v[6:7]
+; GFX12-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_mov_b32_e32 v3, v5
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1854,27 +1854,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_mov_b32_e32 v6, s16
-; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v1
+; GFX11-NEXT: v_mov_b32_e32 v8, s16
+; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: buffer_load_b64 v[0:1], v6, s[0:3], 0 offen
-; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX11-NEXT: buffer_load_b64 v[4:5], v8, s[0:3], 0 offen
; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1906,28 +1906,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v6, s20
-; GFX908-NEXT: v_mov_b32_e32 v2, v0
-; GFX908-NEXT: v_mov_b32_e32 v3, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen
-; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX908-NEXT: v_mov_b32_e32 v8, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen
+; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX908-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX908-NEXT: v_mov_b32_e32 v0, v2
+; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_mov_b32_e32 v2, v4
+; GFX908-NEXT: v_mov_b32_e32 v3, v5
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB14_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1937,28 +1935,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v6, s20
-; GFX8-NEXT: v_mov_b32_e32 v2, v0
-; GFX8-NEXT: v_mov_b32_e32 v3, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v6, s[16:19], 0 offen
-; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v8, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v8, s[16:19], 0 offen
+; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: .LBB14_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_mov_b32_e32 v2, v4
+; GFX8-NEXT: v_mov_b32_e32 v3, v5
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB14_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll
index ba5a8e9..9e412b6 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/udivrem.ll
@@ -209,48 +209,48 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1)
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v2
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v5, v2
; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s10, v3, v[1:2]
-; GFX8-NEXT: v_mov_b32_e32 v6, s9
-; GFX8-NEXT: v_mov_b32_e32 v5, s11
+; GFX8-NEXT: v_sub_u32_e32 v6, vcc, s8, v0
; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s11, v4, v[1:2]
-; GFX8-NEXT: v_sub_u32_e32 v2, vcc, s8, v0
-; GFX8-NEXT: v_subb_u32_e64 v6, s[0:1], v6, v1, vcc
+; GFX8-NEXT: v_mov_b32_e32 v2, s9
+; GFX8-NEXT: v_mov_b32_e32 v5, s11
+; GFX8-NEXT: v_subb_u32_e64 v7, s[0:1], v2, v1, vcc
; GFX8-NEXT: v_sub_u32_e64 v0, s[0:1], s9, v1
-; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s11, v6
+; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s11, v7
; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[0:1]
-; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v2
-; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1]
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s11, v6
+; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v6
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[0:1]
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s11, v7
; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v0, v5, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v7, s[0:1]
-; GFX8-NEXT: v_subrev_u32_e32 v7, vcc, s10, v2
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[0:1]
+; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, s10, v6
; GFX8-NEXT: v_subbrev_u32_e64 v8, s[0:1], 0, v0, vcc
; GFX8-NEXT: v_add_u32_e64 v9, s[0:1], 1, v4
; GFX8-NEXT: v_addc_u32_e64 v10, s[0:1], 0, v3, s[0:1]
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s11, v8
; GFX8-NEXT: v_cndmask_b32_e64 v11, 0, -1, s[0:1]
-; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v7
+; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s10, v2
; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v0, v5, vcc
; GFX8-NEXT: v_cndmask_b32_e64 v12, 0, -1, s[0:1]
; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s11, v8
-; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s10, v7
+; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s10, v2
; GFX8-NEXT: v_cndmask_b32_e64 v11, v11, v12, s[0:1]
; GFX8-NEXT: v_add_u32_e64 v12, s[0:1], 1, v9
; GFX8-NEXT: v_subbrev_u32_e32 v14, vcc, 0, v0, vcc
; GFX8-NEXT: v_addc_u32_e64 v13, s[0:1], 0, v10, s[0:1]
; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
; GFX8-NEXT: v_cndmask_b32_e32 v0, v9, v12, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v9, v10, v13, vcc
; GFX8-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v9, v10, v13, vcc
; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, v0, s[0:1]
-; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, v9, s[0:1]
-; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v5, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v8, v14, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, v3, s[0:1]
-; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v4, s[0:1]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc
; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, v9, s[0:1]
; GFX8-NEXT: v_mov_b32_e32 v5, s5
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v8, v14, vcc
; GFX8-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
; GFX8-NEXT: v_mov_b32_e32 v0, s6
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v6, v2, s[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v7, v3, s[0:1]
; GFX8-NEXT: v_mov_b32_e32 v1, s7
; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; GFX8-NEXT: s_endpgm
@@ -299,7 +299,6 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1)
; GFX9-NEXT: v_add_co_u32_e32 v3, vcc, v3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v4, v1, vcc
; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s2, v3, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, s19
; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s2, v4, v[1:2]
; GFX9-NEXT: v_mul_hi_u32 v6, v3, v0
; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s3, v3, v[1:2]
@@ -346,30 +345,30 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1)
; GFX9-NEXT: v_add_u32_e32 v3, v4, v3
; GFX9-NEXT: v_add3_u32 v3, v3, v2, v6
; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s18, v3, v[1:2]
-; GFX9-NEXT: v_mov_b32_e32 v6, s17
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_sub_co_u32_e32 v7, vcc, s16, v0
; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s19, v5, v[1:2]
-; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, s16, v0
-; GFX9-NEXT: v_subb_co_u32_e64 v6, s[0:1], v6, v1, vcc
-; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s19, v6
+; GFX9-NEXT: v_mov_b32_e32 v2, s17
+; GFX9-NEXT: v_mov_b32_e32 v4, s19
+; GFX9-NEXT: v_subb_co_u32_e64 v8, s[0:1], v2, v1, vcc
+; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s19, v8
; GFX9-NEXT: v_sub_u32_e32 v0, s17, v1
; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[0:1]
-; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s18, v2
-; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, -1, s[0:1]
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s19, v6
-; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v8, s[0:1]
-; GFX9-NEXT: v_subrev_co_u32_e32 v8, vcc, s18, v2
+; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s18, v7
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[0:1]
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s19, v8
+; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[0:1]
+; GFX9-NEXT: v_subrev_co_u32_e32 v2, vcc, s18, v7
; GFX9-NEXT: v_subbrev_co_u32_e64 v9, s[0:1], 0, v0, vcc
; GFX9-NEXT: v_add_co_u32_e64 v10, s[0:1], 1, v5
; GFX9-NEXT: v_addc_co_u32_e64 v11, s[0:1], 0, v3, s[0:1]
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s19, v9
; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, -1, s[0:1]
-; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s18, v8
-; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v7, vcc
+; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s18, v2
+; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v4, vcc
; GFX9-NEXT: v_cndmask_b32_e64 v13, 0, -1, s[0:1]
; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s19, v9
-; GFX9-NEXT: v_subrev_co_u32_e32 v7, vcc, s18, v8
+; GFX9-NEXT: v_subrev_co_u32_e32 v4, vcc, s18, v2
; GFX9-NEXT: v_cndmask_b32_e64 v12, v12, v13, s[0:1]
; GFX9-NEXT: v_add_co_u32_e64 v13, s[0:1], 1, v10
; GFX9-NEXT: v_subbrev_co_u32_e32 v15, vcc, 0, v0, vcc
@@ -378,14 +377,15 @@ define amdgpu_kernel void @udivrem_i64(ptr addrspace(1) %out0, ptr addrspace(1)
; GFX9-NEXT: v_cndmask_b32_e32 v0, v10, v13, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v10, v11, v14, vcc
; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v1
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
; GFX9-NEXT: v_cndmask_b32_e64 v0, v5, v0, s[0:1]
; GFX9-NEXT: v_cndmask_b32_e64 v1, v3, v10, s[0:1]
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v8, v7, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v9, v15, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v3, s[0:1]
-; GFX9-NEXT: v_cndmask_b32_e64 v3, v6, v5, s[0:1]
-; GFX9-NEXT: global_store_dwordx2 v4, v[0:1], s[12:13]
-; GFX9-NEXT: global_store_dwordx2 v4, v[2:3], s[14:15]
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v9, v15, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v7, v2, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v8, v3, s[0:1]
+; GFX9-NEXT: global_store_dwordx2 v6, v[0:1], s[12:13]
+; GFX9-NEXT: global_store_dwordx2 v6, v[2:3], s[14:15]
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: udivrem_i64:
@@ -1070,6 +1070,7 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1
; GFX8-NEXT: v_mul_lo_u32 v3, s8, v1
; GFX8-NEXT: v_mul_hi_u32 v4, s8, v0
; GFX8-NEXT: v_mul_hi_u32 v0, s9, v0
+; GFX8-NEXT: v_mov_b32_e32 v5, s13
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v3
; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v4
@@ -1082,184 +1083,183 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v3
; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v4, v3
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v0, v2
+; GFX8-NEXT: v_add_u32_e32 v7, vcc, v0, v2
; GFX8-NEXT: v_mul_hi_u32 v4, s9, v1
-; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s12, v6, 0
+; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s12, v7, 0
; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v3, v2
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, v4, v2
-; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s12, v7, v[1:2]
+; GFX8-NEXT: v_add_u32_e32 v8, vcc, v4, v2
+; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s12, v8, v[1:2]
+; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s13, v7, v[1:2]
; GFX8-NEXT: v_mov_b32_e32 v3, s9
-; GFX8-NEXT: v_sub_u32_e32 v8, vcc, s8, v0
-; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s13, v6, v[1:2]
-; GFX8-NEXT: v_mov_b32_e32 v4, s13
-; GFX8-NEXT: v_subb_u32_e64 v0, s[0:1], v3, v1, vcc
-; GFX8-NEXT: v_sub_u32_e64 v1, s[0:1], s9, v1
+; GFX8-NEXT: v_sub_u32_e32 v1, vcc, s8, v0
+; GFX8-NEXT: v_subb_u32_e64 v0, s[0:1], v3, v2, vcc
+; GFX8-NEXT: v_sub_u32_e64 v2, s[0:1], s9, v2
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s13, v0
-; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[0:1]
-; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s12, v8
; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, -1, s[0:1]
+; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s12, v1
+; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[0:1]
; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s13, v0
-; GFX8-NEXT: v_cndmask_b32_e64 v9, v2, v3, s[0:1]
-; GFX8-NEXT: v_cvt_f32_u32_e32 v2, s15
-; GFX8-NEXT: v_cvt_f32_u32_e32 v3, s14
-; GFX8-NEXT: v_subb_u32_e32 v5, vcc, v1, v4, vcc
-; GFX8-NEXT: v_mul_f32_e32 v1, 0x4f800000, v2
-; GFX8-NEXT: v_add_f32_e32 v1, v1, v3
-; GFX8-NEXT: v_rcp_iflag_f32_e32 v1, v1
-; GFX8-NEXT: v_subrev_u32_e32 v10, vcc, s12, v8
-; GFX8-NEXT: v_subbrev_u32_e64 v11, s[0:1], 0, v5, vcc
-; GFX8-NEXT: v_mul_f32_e32 v1, 0x5f7ffffc, v1
-; GFX8-NEXT: v_mul_f32_e32 v2, 0x2f800000, v1
-; GFX8-NEXT: v_trunc_f32_e32 v3, v2
-; GFX8-NEXT: v_mul_f32_e32 v2, 0xcf800000, v3
-; GFX8-NEXT: v_add_f32_e32 v1, v2, v1
-; GFX8-NEXT: v_cvt_u32_f32_e32 v12, v1
-; GFX8-NEXT: v_add_u32_e64 v13, s[0:1], 1, v6
-; GFX8-NEXT: v_addc_u32_e64 v14, s[0:1], 0, v7, s[0:1]
-; GFX8-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s2, v12, 0
-; GFX8-NEXT: v_cvt_u32_f32_e32 v15, v3
+; GFX8-NEXT: v_cndmask_b32_e64 v9, v3, v4, s[0:1]
+; GFX8-NEXT: v_cvt_f32_u32_e32 v3, s15
+; GFX8-NEXT: v_cvt_f32_u32_e32 v4, s14
+; GFX8-NEXT: v_subb_u32_e32 v6, vcc, v2, v5, vcc
+; GFX8-NEXT: v_mul_f32_e32 v2, 0x4f800000, v3
+; GFX8-NEXT: v_add_f32_e32 v2, v2, v4
+; GFX8-NEXT: v_rcp_iflag_f32_e32 v2, v2
+; GFX8-NEXT: v_subrev_u32_e32 v10, vcc, s12, v1
+; GFX8-NEXT: v_subbrev_u32_e64 v11, s[0:1], 0, v6, vcc
+; GFX8-NEXT: v_mul_f32_e32 v2, 0x5f7ffffc, v2
+; GFX8-NEXT: v_mul_f32_e32 v3, 0x2f800000, v2
+; GFX8-NEXT: v_trunc_f32_e32 v4, v3
+; GFX8-NEXT: v_mul_f32_e32 v3, 0xcf800000, v4
+; GFX8-NEXT: v_add_f32_e32 v2, v3, v2
+; GFX8-NEXT: v_cvt_u32_f32_e32 v12, v2
+; GFX8-NEXT: v_add_u32_e64 v13, s[0:1], 1, v7
+; GFX8-NEXT: v_addc_u32_e64 v14, s[0:1], 0, v8, s[0:1]
+; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s2, v12, 0
+; GFX8-NEXT: v_cvt_u32_f32_e32 v15, v4
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s13, v11
; GFX8-NEXT: v_cndmask_b32_e64 v16, 0, -1, s[0:1]
-; GFX8-NEXT: v_subb_u32_e32 v4, vcc, v5, v4, vcc
-; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s2, v15, v[2:3]
+; GFX8-NEXT: v_subb_u32_e32 v5, vcc, v6, v5, vcc
+; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s2, v15, v[3:4]
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s12, v10
; GFX8-NEXT: v_cndmask_b32_e64 v17, 0, -1, s[0:1]
-; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s3, v12, v[2:3]
+; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s3, v12, v[3:4]
; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s13, v11
; GFX8-NEXT: v_cndmask_b32_e64 v16, v16, v17, s[0:1]
-; GFX8-NEXT: v_mul_lo_u32 v3, v15, v1
-; GFX8-NEXT: v_mul_lo_u32 v17, v12, v2
-; GFX8-NEXT: v_mul_hi_u32 v5, v12, v1
-; GFX8-NEXT: v_mul_hi_u32 v1, v15, v1
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v17
+; GFX8-NEXT: v_mul_lo_u32 v4, v15, v2
+; GFX8-NEXT: v_mul_lo_u32 v17, v12, v3
+; GFX8-NEXT: v_mul_hi_u32 v6, v12, v2
+; GFX8-NEXT: v_mul_hi_u32 v2, v15, v2
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v17
; GFX8-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v5
-; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
-; GFX8-NEXT: v_mul_lo_u32 v5, v15, v2
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v17, v3
-; GFX8-NEXT: v_mul_hi_u32 v17, v12, v2
-; GFX8-NEXT: v_add_u32_e32 v1, vcc, v5, v1
-; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v17
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v6
+; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; GFX8-NEXT: v_mul_lo_u32 v6, v15, v3
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v17, v4
+; GFX8-NEXT: v_mul_hi_u32 v17, v12, v3
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v6, v2
+; GFX8-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v17
; GFX8-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v17
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v17
; GFX8-NEXT: v_add_u32_e32 v17, vcc, 1, v13
; GFX8-NEXT: v_addc_u32_e32 v18, vcc, 0, v14, vcc
; GFX8-NEXT: v_subrev_u32_e32 v19, vcc, s12, v10
-; GFX8-NEXT: v_mul_hi_u32 v2, v15, v2
-; GFX8-NEXT: v_subbrev_u32_e32 v20, vcc, 0, v4, vcc
-; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v3
-; GFX8-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v5, v3
-; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v3
-; GFX8-NEXT: v_add_u32_e32 v12, vcc, v12, v1
-; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s2, v12, 0
-; GFX8-NEXT: v_addc_u32_e32 v15, vcc, v15, v2, vcc
+; GFX8-NEXT: v_mul_hi_u32 v3, v15, v3
+; GFX8-NEXT: v_subbrev_u32_e32 v20, vcc, 0, v5, vcc
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v4
+; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v6, v4
+; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v4
+; GFX8-NEXT: v_add_u32_e32 v12, vcc, v12, v2
+; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[0:1], s2, v12, 0
+; GFX8-NEXT: v_addc_u32_e32 v15, vcc, v15, v3, vcc
; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v13, v17, vcc
-; GFX8-NEXT: v_mov_b32_e32 v1, v4
-; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[0:1], s2, v15, v[1:2]
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v13, v17, vcc
+; GFX8-NEXT: v_mov_b32_e32 v2, v5
+; GFX8-NEXT: v_mad_u64_u32 v[5:6], s[0:1], s2, v15, v[2:3]
; GFX8-NEXT: v_cndmask_b32_e32 v13, v14, v18, vcc
; GFX8-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v9
-; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[2:3], s3, v12, v[4:5]
-; GFX8-NEXT: v_cndmask_b32_e64 v1, v6, v2, s[0:1]
-; GFX8-NEXT: v_cndmask_b32_e64 v2, v7, v13, s[0:1]
-; GFX8-NEXT: v_cndmask_b32_e32 v5, v10, v19, vcc
-; GFX8-NEXT: v_mul_lo_u32 v7, v15, v3
-; GFX8-NEXT: v_mul_lo_u32 v9, v12, v4
-; GFX8-NEXT: v_cndmask_b32_e64 v5, v8, v5, s[0:1]
-; GFX8-NEXT: v_mul_hi_u32 v8, v12, v3
-; GFX8-NEXT: v_cndmask_b32_e32 v6, v11, v20, vcc
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v9
-; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v8
-; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GFX8-NEXT: v_mul_lo_u32 v8, v15, v4
-; GFX8-NEXT: v_mul_hi_u32 v3, v15, v3
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, v9, v7
+; GFX8-NEXT: v_mad_u64_u32 v[5:6], s[2:3], s3, v12, v[5:6]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v7, v3, s[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v8, v13, s[0:1]
+; GFX8-NEXT: v_mul_lo_u32 v7, v15, v4
+; GFX8-NEXT: v_mul_lo_u32 v8, v12, v5
; GFX8-NEXT: v_mul_hi_u32 v9, v12, v4
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v8, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v19, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v10, v11, v20, vcc
+; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v8
; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v9
-; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
-; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v9
-; GFX8-NEXT: v_mul_hi_u32 v4, v15, v4
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v7
+; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v9
; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
+; GFX8-NEXT: v_mul_lo_u32 v9, v15, v5
+; GFX8-NEXT: v_mul_hi_u32 v4, v15, v4
; GFX8-NEXT: v_add_u32_e32 v7, vcc, v8, v7
+; GFX8-NEXT: v_mul_hi_u32 v8, v12, v5
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v9, v4
+; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v8
+; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GFX8-NEXT: v_add_u32_e32 v8, vcc, v9, v8
+; GFX8-NEXT: v_mul_hi_u32 v5, v15, v5
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v7
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v12, v3
-; GFX8-NEXT: v_addc_u32_e32 v4, vcc, v15, v4, vcc
-; GFX8-NEXT: v_mul_lo_u32 v7, s11, v3
-; GFX8-NEXT: v_mul_lo_u32 v8, s10, v4
-; GFX8-NEXT: v_cndmask_b32_e64 v6, v0, v6, s[0:1]
-; GFX8-NEXT: v_mul_hi_u32 v0, s10, v3
-; GFX8-NEXT: v_mul_hi_u32 v3, s11, v3
+; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
+; GFX8-NEXT: v_add_u32_e32 v7, vcc, v8, v7
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v7
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v12, v4
+; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v15, v5, vcc
+; GFX8-NEXT: v_mul_lo_u32 v7, s11, v4
+; GFX8-NEXT: v_mul_lo_u32 v8, s10, v5
+; GFX8-NEXT: v_cndmask_b32_e64 v6, v1, v6, s[0:1]
+; GFX8-NEXT: v_mul_hi_u32 v1, s10, v4
+; GFX8-NEXT: v_mul_hi_u32 v4, s11, v4
; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v8
; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, v7, v0
-; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX8-NEXT: v_mul_lo_u32 v7, s11, v4
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, v8, v0
-; GFX8-NEXT: v_mul_hi_u32 v8, s10, v4
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v7, v3
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v7, v1
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT: v_mul_lo_u32 v7, s11, v5
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v8, v1
+; GFX8-NEXT: v_mul_hi_u32 v8, s10, v5
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v7, v4
; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v8
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v8
; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v8
-; GFX8-NEXT: v_add_u32_e32 v9, vcc, v3, v0
-; GFX8-NEXT: v_mul_hi_u32 v8, s11, v4
-; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s14, v9, 0
-; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, v7, v0
-; GFX8-NEXT: v_add_u32_e32 v10, vcc, v8, v0
-; GFX8-NEXT: v_mov_b32_e32 v0, v4
-; GFX8-NEXT: v_mad_u64_u32 v[7:8], s[0:1], s14, v10, v[0:1]
-; GFX8-NEXT: v_mov_b32_e32 v4, s11
-; GFX8-NEXT: v_mov_b32_e32 v0, s15
-; GFX8-NEXT: v_mad_u64_u32 v[7:8], s[0:1], s15, v9, v[7:8]
-; GFX8-NEXT: v_sub_u32_e32 v8, vcc, s10, v3
-; GFX8-NEXT: v_subb_u32_e64 v11, s[0:1], v4, v7, vcc
-; GFX8-NEXT: v_sub_u32_e64 v3, s[0:1], s11, v7
-; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s15, v11
+; GFX8-NEXT: v_add_u32_e32 v11, vcc, v4, v1
+; GFX8-NEXT: v_mul_hi_u32 v8, s11, v5
+; GFX8-NEXT: v_mad_u64_u32 v[4:5], s[2:3], s14, v11, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v7, v1
+; GFX8-NEXT: v_add_u32_e32 v12, vcc, v8, v1
+; GFX8-NEXT: v_mov_b32_e32 v1, v5
+; GFX8-NEXT: v_mad_u64_u32 v[8:9], s[2:3], s14, v12, v[1:2]
+; GFX8-NEXT: v_cndmask_b32_e64 v7, v0, v10, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v5, s15
+; GFX8-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s15, v11, v[8:9]
+; GFX8-NEXT: v_mov_b32_e32 v1, s11
+; GFX8-NEXT: v_sub_u32_e32 v8, vcc, s10, v4
+; GFX8-NEXT: v_subb_u32_e64 v1, s[0:1], v1, v0, vcc
+; GFX8-NEXT: v_sub_u32_e64 v0, s[0:1], s11, v0
+; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s15, v1
; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[0:1]
; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s14, v8
-; GFX8-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1]
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s15, v11
-; GFX8-NEXT: v_subb_u32_e32 v3, vcc, v3, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, v7, s[0:1]
-; GFX8-NEXT: v_subrev_u32_e32 v7, vcc, s14, v8
-; GFX8-NEXT: v_subbrev_u32_e64 v12, s[0:1], 0, v3, vcc
-; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s15, v12
+; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[0:1]
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s15, v1
+; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v0, v5, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, v9, s[0:1]
+; GFX8-NEXT: v_subrev_u32_e32 v9, vcc, s14, v8
+; GFX8-NEXT: v_subbrev_u32_e64 v10, s[0:1], 0, v0, vcc
+; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s15, v10
; GFX8-NEXT: v_cndmask_b32_e64 v13, 0, -1, s[0:1]
-; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s14, v7
+; GFX8-NEXT: v_cmp_le_u32_e64 s[0:1], s14, v9
; GFX8-NEXT: v_cndmask_b32_e64 v14, 0, -1, s[0:1]
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s15, v12
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s15, v10
; GFX8-NEXT: v_cndmask_b32_e64 v13, v13, v14, s[0:1]
-; GFX8-NEXT: v_add_u32_e64 v14, s[0:1], 1, v9
-; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v3, v0, vcc
-; GFX8-NEXT: v_addc_u32_e64 v15, s[0:1], 0, v10, s[0:1]
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, 1, v14
+; GFX8-NEXT: v_add_u32_e64 v14, s[0:1], 1, v11
+; GFX8-NEXT: v_subb_u32_e32 v0, vcc, v0, v5, vcc
+; GFX8-NEXT: v_addc_u32_e64 v15, s[0:1], 0, v12, s[0:1]
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, 1, v14
; GFX8-NEXT: v_addc_u32_e32 v16, vcc, 0, v15, vcc
; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
-; GFX8-NEXT: v_subrev_u32_e64 v13, s[0:1], s14, v7
+; GFX8-NEXT: v_subrev_u32_e64 v13, s[0:1], s14, v9
; GFX8-NEXT: v_subbrev_u32_e64 v0, s[0:1], 0, v0, s[0:1]
-; GFX8-NEXT: v_cndmask_b32_e32 v3, v14, v3, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v14, v15, v16, vcc
; GFX8-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v4
-; GFX8-NEXT: v_cndmask_b32_e64 v3, v9, v3, s[0:1]
-; GFX8-NEXT: v_cndmask_b32_e64 v4, v10, v14, s[0:1]
-; GFX8-NEXT: v_mov_b32_e32 v10, s5
-; GFX8-NEXT: v_cndmask_b32_e32 v7, v7, v13, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v12, v0, vcc
-; GFX8-NEXT: v_mov_b32_e32 v9, s4
-; GFX8-NEXT: v_cndmask_b32_e64 v7, v8, v7, s[0:1]
-; GFX8-NEXT: v_cndmask_b32_e64 v8, v11, v0, s[0:1]
-; GFX8-NEXT: flat_store_dwordx4 v[9:10], v[1:4]
+; GFX8-NEXT: v_cndmask_b32_e32 v9, v9, v13, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v10, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v14, v5, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v14, v15, v16, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v8, v8, v9, s[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v9, v1, v0, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v11, v5, s[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v12, v14, s[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v1, s5
+; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
; GFX8-NEXT: v_mov_b32_e32 v0, s6
; GFX8-NEXT: v_mov_b32_e32 v1, s7
-; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[5:8]
+; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[6:9]
; GFX8-NEXT: s_endpgm
;
; GFX9-LABEL: udivrem_v2i64:
@@ -1355,11 +1355,11 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1
; GFX9-NEXT: v_add_u32_e32 v3, v4, v3
; GFX9-NEXT: v_add3_u32 v8, v3, v2, v5
; GFX9-NEXT: v_mad_u64_u32 v[1:2], s[0:1], s4, v8, v[1:2]
-; GFX9-NEXT: v_mov_b32_e32 v4, s17
; GFX9-NEXT: v_mov_b32_e32 v5, s5
; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s5, v7, v[1:2]
+; GFX9-NEXT: v_mov_b32_e32 v3, s17
; GFX9-NEXT: v_sub_co_u32_e32 v1, vcc, s16, v0
-; GFX9-NEXT: v_subb_co_u32_e64 v0, s[0:1], v4, v2, vcc
+; GFX9-NEXT: v_subb_co_u32_e64 v0, s[0:1], v3, v2, vcc
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s5, v0
; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, -1, s[0:1]
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s4, v1
@@ -1387,7 +1387,7 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1
; GFX9-NEXT: v_cvt_u32_f32_e32 v15, v4
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s5, v11
; GFX9-NEXT: v_cndmask_b32_e64 v16, 0, -1, s[0:1]
-; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v6, v5, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v6, vcc, v6, v5, vcc
; GFX9-NEXT: v_mad_u64_u32 v[3:4], s[0:1], s2, v15, v[3:4]
; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s4, v10
; GFX9-NEXT: v_cndmask_b32_e64 v17, 0, -1, s[0:1]
@@ -1396,128 +1396,128 @@ define amdgpu_kernel void @udivrem_v2i64(ptr addrspace(1) %out0, ptr addrspace(1
; GFX9-NEXT: v_cndmask_b32_e64 v16, v16, v17, s[0:1]
; GFX9-NEXT: v_mul_lo_u32 v4, v15, v2
; GFX9-NEXT: v_mul_lo_u32 v17, v12, v3
-; GFX9-NEXT: v_mul_hi_u32 v6, v12, v2
+; GFX9-NEXT: v_mul_hi_u32 v5, v12, v2
; GFX9-NEXT: v_mul_hi_u32 v2, v15, v2
; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v17
; GFX9-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v6
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v5
; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; GFX9-NEXT: v_mul_lo_u32 v6, v15, v3
+; GFX9-NEXT: v_mul_lo_u32 v5, v15, v3
; GFX9-NEXT: v_add_u32_e32 v4, v17, v4
; GFX9-NEXT: v_mul_hi_u32 v17, v12, v3
; GFX9-NEXT: v_mul_hi_u32 v3, v15, v3
-; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v6, v2
-; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v5, v2
+; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v17
; GFX9-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc
-; GFX9-NEXT: v_add_u32_e32 v6, v6, v17
+; GFX9-NEXT: v_add_u32_e32 v5, v5, v17
; GFX9-NEXT: v_add_co_u32_e32 v17, vcc, 1, v13
; GFX9-NEXT: v_addc_co_u32_e32 v18, vcc, 0, v14, vcc
-; GFX9-NEXT: v_subrev_co_u32_e32 v19, vcc, s4, v10
-; GFX9-NEXT: v_subbrev_co_u32_e32 v20, vcc, 0, v5, vcc
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v4
; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
; GFX9-NEXT: v_add_co_u32_e32 v12, vcc, v12, v2
-; GFX9-NEXT: v_add3_u32 v3, v6, v4, v3
+; GFX9-NEXT: v_add3_u32 v3, v5, v4, v3
; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[0:1], s2, v12, 0
; GFX9-NEXT: v_addc_co_u32_e32 v15, vcc, v15, v3, vcc
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v13, v17, vcc
; GFX9-NEXT: v_mov_b32_e32 v2, v5
-; GFX9-NEXT: v_mad_u64_u32 v[5:6], s[0:1], s2, v15, v[2:3]
-; GFX9-NEXT: v_cndmask_b32_e32 v13, v14, v18, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], s2, v15, v[2:3]
+; GFX9-NEXT: v_subrev_co_u32_e32 v19, vcc, s4, v10
+; GFX9-NEXT: v_subbrev_co_u32_e32 v20, vcc, 0, v6, vcc
+; GFX9-NEXT: v_mad_u64_u32 v[5:6], s[0:1], s3, v12, v[2:3]
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: v_cndmask_b32_e32 v13, v13, v17, vcc
; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v9
-; GFX9-NEXT: v_mad_u64_u32 v[5:6], s[2:3], s3, v12, v[5:6]
-; GFX9-NEXT: v_cndmask_b32_e64 v2, v7, v3, s[0:1]
-; GFX9-NEXT: v_cndmask_b32_e64 v3, v8, v13, s[0:1]
-; GFX9-NEXT: v_mul_lo_u32 v7, v15, v4
-; GFX9-NEXT: v_mul_lo_u32 v8, v12, v5
-; GFX9-NEXT: v_cndmask_b32_e32 v6, v10, v19, vcc
-; GFX9-NEXT: v_mul_hi_u32 v10, v12, v4
-; GFX9-NEXT: v_cndmask_b32_e32 v9, v11, v20, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v8
-; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v10
-; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GFX9-NEXT: v_mul_lo_u32 v10, v15, v5
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v7, v13, s[0:1]
+; GFX9-NEXT: v_mul_lo_u32 v6, v15, v4
+; GFX9-NEXT: v_mul_lo_u32 v7, v12, v5
+; GFX9-NEXT: v_mul_hi_u32 v9, v12, v4
; GFX9-NEXT: v_mul_hi_u32 v4, v15, v4
-; GFX9-NEXT: v_add_u32_e32 v7, v8, v7
-; GFX9-NEXT: v_mul_hi_u32 v8, v12, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v14, v14, v18, vcc
+; GFX9-NEXT: v_add_co_u32_e64 v6, s[2:3], v6, v7
+; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[2:3]
+; GFX9-NEXT: v_add_co_u32_e64 v6, s[2:3], v6, v9
+; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[2:3]
+; GFX9-NEXT: v_mul_lo_u32 v9, v15, v5
+; GFX9-NEXT: v_add_u32_e32 v6, v7, v6
+; GFX9-NEXT: v_mul_hi_u32 v7, v12, v5
; GFX9-NEXT: v_mul_hi_u32 v5, v15, v5
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v10, v4
-; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v8
-; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v7
-; GFX9-NEXT: v_add_u32_e32 v8, v10, v8
+; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], v9, v4
+; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, 1, s[2:3]
+; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], v4, v7
+; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, s[2:3]
+; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], v4, v6
+; GFX9-NEXT: v_add_u32_e32 v7, v9, v7
+; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[2:3]
+; GFX9-NEXT: v_add3_u32 v5, v7, v6, v5
+; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], v12, v4
+; GFX9-NEXT: v_addc_co_u32_e64 v5, s[2:3], v15, v5, s[2:3]
+; GFX9-NEXT: v_mul_lo_u32 v6, s19, v4
+; GFX9-NEXT: v_mul_lo_u32 v7, s18, v5
+; GFX9-NEXT: v_mul_hi_u32 v9, s18, v4
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v8, v14, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v10, v19, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v10, v11, v20, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v6, v7
; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; GFX9-NEXT: v_add3_u32 v5, v8, v7, v5
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v12, v4
-; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v15, v5, vcc
-; GFX9-NEXT: v_mul_lo_u32 v7, s19, v4
-; GFX9-NEXT: v_mul_lo_u32 v8, s18, v5
-; GFX9-NEXT: v_cndmask_b32_e64 v6, v1, v6, s[0:1]
-; GFX9-NEXT: v_mul_hi_u32 v1, s18, v4
+; GFX9-NEXT: v_add_co_u32_e32 v6, vcc, v6, v9
+; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
+; GFX9-NEXT: v_mul_lo_u32 v9, s19, v5
; GFX9-NEXT: v_mul_hi_u32 v4, s19, v4
-; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v7, v8
-; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, v7, v1
-; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GFX9-NEXT: v_mul_lo_u32 v7, s19, v5
-; GFX9-NEXT: v_add_u32_e32 v1, v8, v1
-; GFX9-NEXT: v_mul_hi_u32 v8, s18, v5
-; GFX9-NEXT: v_mul_hi_u32 v12, s19, v5
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v7, v4
-; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v8
-; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v11, vcc, v4, v1
+; GFX9-NEXT: v_add_u32_e32 v6, v7, v6
+; GFX9-NEXT: v_mul_hi_u32 v7, s18, v5
+; GFX9-NEXT: v_mul_hi_u32 v13, s19, v5
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v9, v4
+; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v7
+; GFX9-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v11, vcc, v4, v6
; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[2:3], s6, v11, 0
-; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v7, v0, v9, s[0:1]
-; GFX9-NEXT: v_add_u32_e32 v0, v10, v8
-; GFX9-NEXT: v_add3_u32 v8, v0, v1, v12
-; GFX9-NEXT: v_mov_b32_e32 v0, v5
-; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s6, v8, v[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v9, s19
+; GFX9-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v1, v8, s[0:1]
+; GFX9-NEXT: v_add_u32_e32 v1, v9, v7
+; GFX9-NEXT: v_add3_u32 v12, v1, v12, v13
+; GFX9-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[2:3], s6, v12, v[1:2]
+; GFX9-NEXT: v_cndmask_b32_e64 v7, v0, v10, s[0:1]
; GFX9-NEXT: v_mov_b32_e32 v5, s7
-; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s7, v11, v[0:1]
-; GFX9-NEXT: v_sub_co_u32_e32 v1, vcc, s18, v4
-; GFX9-NEXT: v_subb_co_u32_e64 v9, s[0:1], v9, v0, vcc
-; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s7, v9
+; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s7, v11, v[8:9]
+; GFX9-NEXT: v_mov_b32_e32 v1, s19
+; GFX9-NEXT: v_sub_co_u32_e32 v8, vcc, s18, v4
+; GFX9-NEXT: v_subb_co_u32_e64 v1, s[0:1], v1, v0, vcc
+; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s7, v1
; GFX9-NEXT: v_sub_u32_e32 v0, s19, v0
; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[0:1]
-; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v1
-; GFX9-NEXT: v_cndmask_b32_e64 v10, 0, -1, s[0:1]
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s7, v9
+; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v8
+; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, -1, s[0:1]
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s7, v1
; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v5, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v4, v4, v10, s[0:1]
-; GFX9-NEXT: v_subrev_co_u32_e32 v10, vcc, s6, v1
-; GFX9-NEXT: v_subbrev_co_u32_e64 v12, s[0:1], 0, v0, vcc
-; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s7, v12
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v4, v9, s[0:1]
+; GFX9-NEXT: v_subrev_co_u32_e32 v9, vcc, s6, v8
+; GFX9-NEXT: v_subbrev_co_u32_e64 v10, s[0:1], 0, v0, vcc
+; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s7, v10
; GFX9-NEXT: v_cndmask_b32_e64 v13, 0, -1, s[0:1]
-; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v10
+; GFX9-NEXT: v_cmp_le_u32_e64 s[0:1], s6, v9
; GFX9-NEXT: v_cndmask_b32_e64 v14, 0, -1, s[0:1]
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s7, v12
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s7, v10
; GFX9-NEXT: v_cndmask_b32_e64 v13, v13, v14, s[0:1]
; GFX9-NEXT: v_add_co_u32_e64 v14, s[0:1], 1, v11
; GFX9-NEXT: v_subb_co_u32_e32 v0, vcc, v0, v5, vcc
-; GFX9-NEXT: v_addc_co_u32_e64 v15, s[0:1], 0, v8, s[0:1]
+; GFX9-NEXT: v_addc_co_u32_e64 v15, s[0:1], 0, v12, s[0:1]
; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, 1, v14
; GFX9-NEXT: v_addc_co_u32_e32 v16, vcc, 0, v15, vcc
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
; GFX9-NEXT: v_cndmask_b32_e32 v5, v14, v5, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v14, v15, v16, vcc
-; GFX9-NEXT: v_subrev_co_u32_e64 v15, s[0:1], s6, v10
+; GFX9-NEXT: v_subrev_co_u32_e64 v15, s[0:1], s6, v9
; GFX9-NEXT: v_subbrev_co_u32_e64 v0, s[0:1], 0, v0, s[0:1]
; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v4
; GFX9-NEXT: v_mov_b32_e32 v13, 0
; GFX9-NEXT: v_cndmask_b32_e64 v4, v11, v5, s[0:1]
-; GFX9-NEXT: v_cndmask_b32_e64 v5, v8, v14, s[0:1]
-; GFX9-NEXT: v_cndmask_b32_e32 v8, v10, v15, vcc
-; GFX9-NEXT: v_cndmask_b32_e32 v0, v12, v0, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v8, v1, v8, s[0:1]
-; GFX9-NEXT: v_cndmask_b32_e64 v9, v9, v0, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v5, v12, v14, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v9, v15, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v10, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, v9, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v9, v1, v0, s[0:1]
; GFX9-NEXT: global_store_dwordx4 v13, v[2:5], s[12:13]
; GFX9-NEXT: global_store_dwordx4 v13, v[6:9], s[14:15]
; GFX9-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
index d053425..7cc5051 100644
--- a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
+++ b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
@@ -1483,7 +1483,6 @@ define void @flat_atomic_xchg_i64_noret_av(ptr %ptr) #0 {
; GFX90A-NEXT: buffer_invl2
; GFX90A-NEXT: buffer_wbinvl1_vol
; GFX90A-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX90A-NEXT: ; implicit-def: $vgpr2_vgpr3
; GFX90A-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_cbranch_execz .LBB20_2
; GFX90A-NEXT: .LBB20_4: ; %atomicrmw.private
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
index 815b9f2..df9c97f 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
@@ -161654,177 +161654,175 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_clause 0x1f
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:244
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:240
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:236
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:232
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:228
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:224
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:220
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:216
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:212
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:208
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:204
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:200
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:196
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:192
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:188
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:184
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:180
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:176
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:172
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:168
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:164
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:160
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:156
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:152
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:148
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:144
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:140
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:136
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:132
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:128
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:124
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:120
-; GFX11-TRUE16-NEXT: s_clause 0x1a
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:116
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:112
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:108
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:104
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:100
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:96
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:92
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:88
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:84
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:80
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:76
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:72
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:68
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:64
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:60
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:56
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:52
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:48
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:44
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:40
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:36
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:32
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:28
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:24
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:20
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v153, s32 offset:16
-; GFX11-TRUE16-NEXT: scratch_store_b32 off, v154, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v40, s32 offset:236
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v41, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v42, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v43, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v44, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v45, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v46, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v47, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v56, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v57, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v58, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v59, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v60, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v61, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v62, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v63, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v72, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v73, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v74, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v75, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v76, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v77, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v78, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v79, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v88, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v89, s32 offset:136
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v90, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v91, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v92, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v93, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v94, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v95, s32 offset:112
+; GFX11-TRUE16-NEXT: s_clause 0x18
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v104, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v105, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v106, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v107, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v108, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v109, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v110, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v111, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v120, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v121, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v122, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v123, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v124, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v125, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v126, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v127, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v136, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v137, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v138, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v139, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v140, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v141, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v142, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v143, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_store_b32 off, v152, s32 offset:12
; GFX11-TRUE16-NEXT: s_clause 0x2
; GFX11-TRUE16-NEXT: scratch_load_b32 v31, off, s32 offset:8
-; GFX11-TRUE16-NEXT: scratch_load_b32 v85, off, s32 offset:4
-; GFX11-TRUE16-NEXT: scratch_load_b32 v84, off, s32
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr180_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr143_lo16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v99, off, s32 offset:4
+; GFX11-TRUE16-NEXT: scratch_load_b32 v98, off, s32
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr178_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr152_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr179_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr142_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr141_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr43_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr177_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr140_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr183_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr139_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr127_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr62_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr125_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr40_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr138_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr179_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr137_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr56_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr126_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr47_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr42_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr123_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr121_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr110_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr79_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr111_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr74_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr60_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr109_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr107_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr111_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr106_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr95_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr89_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr76_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr93_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr90_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr138_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr79_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr108_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr77_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr91_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr127_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr89_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr104_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr75_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr153_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr72_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr137_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr142_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr73_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr125_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr63_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr61_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr59_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr154_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr57_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr143_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr58_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr152_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr46_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr44_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr136_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr141_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr47_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr124_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr39_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr126_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr124_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr122_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr120_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr109_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr106_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr110_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr33_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr108_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr105_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr94_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr104_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr94_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr92_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr90_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr88_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr77_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr74_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr72_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr161_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr88_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr78_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr62_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr59_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr164_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr76_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr57_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr163_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr73_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr63_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr46_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr44_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr166_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr60_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr43_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr165_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr58_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr56_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr176_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr45_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr167_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr42_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr41_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr178_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr40_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr177_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr183_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr176_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr182_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr167_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr181_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr180_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr52_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_hi16
@@ -161838,135 +161836,135 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB90_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[100:101], 24, v[15:16]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[116:117], 24, v[11:12]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[132:133], 24, v[7:8]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[133:134], 24, v[5:6]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[23:24]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[19:20]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 8, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[84:85], 24, v[27:28]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[13:14]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[114:115], 24, v[11:12]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[9:10]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[130:131], 24, v[7:8]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[144:145], 24, v[3:4]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[85:86], 24, v[25:26]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 24, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v47, 8, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 24, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 8, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v13
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v75, 24, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v79, 8, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 8, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v89, 8, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v91, 24, v10
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v93, 8, v10
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v95, 8, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 8, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 24, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v111, 8, v7
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v121, 24, v6
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v123, 8, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v125, 8, v5
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v127, 24, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 8, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v3
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v141, 24, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v142, 8, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v143, 8, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 24, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v137, 8, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v138, 8, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 24, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v2
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v152, 8, v1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 24, v85
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v85
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v180, 24, v99
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 8, v99
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v40, 8, v84
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 24, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v42, 8, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 8, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v56, 24, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v60, 8, v27
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 24, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v76, 8, v25
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 24, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v23
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 24, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v104, 8, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v106, 8, v21
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 24, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 8, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v19
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 24, v18
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v18
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 8, v17
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[112:113], 24, v[13:14]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[128:129], 24, v[9:10]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[134:135], 24, v[3:4]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[146:147], 24, v[1:2]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[84:85]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v98
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v183, 24, v30
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 8, v30
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v43, 8, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v62, 8, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v74, 24, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 8, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v108, 8, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 24, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 8, v17
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[131:132], 24, v[5:6]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[145:146], 24, v[1:2]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[98:99]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[80:81], 24, v[29:30]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[27:28]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[96:97], 24, v[25:26]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[23:24]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[102:103], 24, v[21:22]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[115:116], 24, v[19:20]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[118:119], 24, v[17:18]
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v180.h, v1.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v178.h, v1.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v64.h, v1.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v179.h, v2.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v177.h, v2.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v65.h, v2.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v43.h, v3.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v40.h, v3.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.h, v3.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v183.h, v4.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v179.h, v4.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v67.h, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v62.h, v5.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v56.h, v5.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.h, v5.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v47.h, v6.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v42.h, v6.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.h, v6.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v91.h, v7.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v79.h, v7.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.h, v7.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v74.h, v8.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v60.h, v8.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.h, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v111.h, v9.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v98.h, v9.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v89.h, v10.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v99.h, v10.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v138.h, v11.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v114.h, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v108.h, v12.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v115.h, v12.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v153.h, v13.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v130.h, v13.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v137.h, v14.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v131.h, v14.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v154.h, v15.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v144.h, v15.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v152.h, v16.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.h, v16.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v148.h, v17.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v106.h, v9.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v96.h, v9.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v76.h, v10.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v97.h, v10.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v127.h, v11.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v112.h, v11.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v104.h, v12.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v113.h, v12.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v142.h, v13.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v128.h, v13.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v125.h, v14.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.h, v14.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v143.h, v15.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v133.h, v15.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v141.h, v16.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.h, v16.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v135.h, v17.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.h, v17.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v39.h, v18.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v18.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v150.h, v19.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v148.h, v19.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v19.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.h, v20.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v147.h, v20.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v34.h, v20.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v160.h, v21.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v150.h, v21.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v35.h, v21.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v151.h, v22.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v149.h, v22.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v36.h, v22.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v162.h, v23.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v160.h, v23.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v23.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v161.h, v24.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v151.h, v24.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v24.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v164.h, v25.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v162.h, v25.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v48.h, v25.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v163.h, v26.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v161.h, v26.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v49.h, v26.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v166.h, v27.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v164.h, v27.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v50.h, v27.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v165.h, v28.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v163.h, v28.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v51.h, v28.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v176.h, v29.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v166.h, v29.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v52.h, v29.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v167.h, v30.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v165.h, v30.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v53.h, v30.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v178.h, v84.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v54.h, v84.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v177.h, v85.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v55.h, v85.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v176.h, v98.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v54.h, v98.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v167.h, v99.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v55.h, v99.h
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr1
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr3
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr5
@@ -161982,148 +161980,153 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99
; GFX11-TRUE16-NEXT: .LBB90_2: ; %Flow
; GFX11-TRUE16-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB90_4
; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-TRUE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v17
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v34, 0x40c00000, v32 :: v_dual_lshlrev_b32 v31, 16, v18
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v31, 16, v18
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v20
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v34, 0x40c00000, v32 :: v_dual_add_f32 v31, 0x40c00000, v31
; GFX11-TRUE16-NEXT: v_bfe_u32 v38, v34, 16, 1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_bfe_u32 v32, v31, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v37, 0x400000, v31
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v31, v31
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v32, v32, v31, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX11-TRUE16-NEXT: v_add3_u32 v31, v38, v34, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v39, v32, v37, vcc_lo
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_dual_add_f32 v18, 0x40c00000, v18 :: v_dual_lshlrev_b32 v17, 16, v17
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_bfe_u32 v35, v18, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v18
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_add3_u32 v35, v35, v18, 0x7fff
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v17, 0x40c00000, v17 :: v_dual_cndmask_b32 v32, v35, v36
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v17, 0x40c00000, v17
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v33
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v34
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v32, v35, v36, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v48, v17, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v49, 0x400000, v17
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v39.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v37, v48, v17, 0x7fff
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v20
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 24, v32
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v32
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 24, v32
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v32
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v148, v37, v49, vcc_lo
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v33
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v34
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v135, v37, v49, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff0000, v19
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v19, 16, v19
; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v20
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v31, v31, v33, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v33, v20, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v20
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v36, 0x40c00000, v34 :: v_dual_add_f32 v19, 0x40c00000, v19
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_add3_u32 v33, v33, v20, 0x7fff
; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v36, 0x40c00000, v34 :: v_dual_add_f32 v19, 0x40c00000, v19
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_add3_u32 v33, v33, v20, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v34, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v31.l, v135.h
; GFX11-TRUE16-NEXT: v_bfe_u32 v20, v36, 16, 1
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v31.l, v148.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v149, v33, v35, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v147, v33, v35, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v36
-; GFX11-TRUE16-NEXT: v_add3_u32 v20, v20, v36, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v33, 0x400000, v19
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v22
+; GFX11-TRUE16-NEXT: v_add3_u32 v20, v20, v36, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v36
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v124, 8, v31
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v34, v17, v34, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v19, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v34.l, v149.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 8, v31
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v34.l, v147.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v19, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v150, v17, v33, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v34
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v108, 8, v34
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v148, v17, v33, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v33, v20, v35 :: v_dual_and_b32 v20, 0xffff0000, v21
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v33, v20, v35 :: v_dual_and_b32 v18, 0xffff0000, v22
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v21
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v21
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 24, v34
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v120, 8, v34
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_add_f32 v22, 0x40c00000, v22
; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_lshlrev_b32 v22, 16, v22
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff
; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v22
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v151, v19, v35 :: v_dual_lshlrev_b32 v22, 16, v24
-; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v20
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22
-; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v36, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v149, v19, v35 :: v_dual_lshlrev_b32 v22, 16, v24
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v24
; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v21
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v36, v17, v36, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v21, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v36.l, v151.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v36.l, v149.h
; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v160, v17, v24 :: v_dual_lshlrev_b32 v21, 16, v23
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v23
+; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v35, 0x400000, v20
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v150, v17, v24, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
-; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1
+; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v23
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v21, 0x40c00000, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v36
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v35, v19, v35, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
-; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_add_f32 v20, 0x40c00000, v20
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v33.l, v148.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v36
; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 24, v36
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v104, 8, v36
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v161, v19, v23, vcc_lo
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v33
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v151, v19, v23, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v26
; GFX11-TRUE16-NEXT: v_add_f32_e32 v22, 0x40c00000, v22
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
+; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v18
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v18, 0x7fff
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v26
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v38, v17, v24, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v21, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
-; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v20, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v20
; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v38.l, v151.h
; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v25
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v33.l, v150.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v38.l, v161.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v162, v17, v23, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v160, v17, v23 :: v_dual_lshlrev_b32 v21, 16, v25
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22
@@ -162136,10 +162139,8 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v18
; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v122, 8, v33
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v163, v19, v23, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v161, v19, v23 :: v_dual_lshlrev_b32 v22, 16, v28
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21
@@ -162152,10 +162153,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v27
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v35.l, v160.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v49.l, v163.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v49.l, v161.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v35.l, v150.h
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v164, v17, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v162, v17, v23, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22
@@ -162168,10 +162169,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 24, v49
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v49
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v106, 8, v35
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v165, v19, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v49
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v62, 8, v49
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v94, 8, v35
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v163, v19, v23, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21
@@ -162184,10 +162185,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v29
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v51.l, v165.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 24, v38
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v38
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v166, v17, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v51.l, v163.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v74, 24, v38
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v38
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v164, v17, v23, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22
@@ -162200,14 +162201,14 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v85
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v56, 24, v51
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v51
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v167, v19, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v99
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v51
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v51
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v165, v19, v23, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v21
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v85
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v99
; GFX11-TRUE16-NEXT: v_dual_add_f32 v22, 0x40c00000, v22 :: v_dual_cndmask_b32 v53, v17, v24
; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v21, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
@@ -162216,14 +162217,14 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v84
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v53.l, v167.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v176, v17, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v98
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v37.l, v160.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v53.l, v165.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v166, v17, v23, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, 0x400000, v22
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v84
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v98
; GFX11-TRUE16-NEXT: v_dual_add_f32 v21, 0x40c00000, v21 :: v_dual_cndmask_b32 v52, v19, v24
; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
@@ -162232,10 +162233,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v22, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v21
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v37.l, v162.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 24, v53
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v42, 8, v53
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v177, v19, v23, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v183, 24, v53
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v41, 8, v53
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v88, 8, v37
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v167, v19, v23, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
; GFX11-TRUE16-NEXT: v_bfe_u32 v19, v20, 16, 1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v2
@@ -162248,11 +162249,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v21, 0x7fff
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v55.l, v177.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v92, 8, v37
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v55.l, v167.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, 0x400000, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v178, v17, v22, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v176, v17, v22, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
; GFX11-TRUE16-NEXT: v_bfe_u32 v17, v18, 16, 1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v1
@@ -162265,11 +162265,11 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_dual_add_f32 v1, 0x40c00000, v1 :: v_dual_add_f32 v20, 0x40c00000, v20
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
; GFX11-TRUE16-NEXT: v_add3_u32 v19, v19, v2, 0x7fff
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 24, v55
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v55
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v180, 24, v55
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v181, 8, v55
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v20, 16, 1
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v179, v19, v21, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v177, v19, v21, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
; GFX11-TRUE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v4
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
@@ -162282,10 +162282,11 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
; GFX11-TRUE16-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
; GFX11-TRUE16-NEXT: v_add3_u32 v17, v17, v1, 0x7fff
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v65.l, v179.h
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v48.l, v162.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v65.l, v177.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v18, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v180, v17, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v178, v17, v19, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
; GFX11-TRUE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v3
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
@@ -162300,9 +162301,9 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v4, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v6
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v48.l, v164.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v141, 24, v65
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v183, v2, v19, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 24, v65
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v65
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v179, v2, v19, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v17, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v3
@@ -162312,13 +162313,13 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v17, 0x7fff
; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v67.l, v183.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v67.l, v179.h
; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v3, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v5
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v50.l, v166.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[96:97], 24, v[48:49]
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v43, v1, v18, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v50.l, v164.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v136, 24, v67
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v40, v1, v18, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v4, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v6
@@ -162329,13 +162330,13 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v4, 0x7fff
; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.l, v43.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[84:85], 24, v[50:51]
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[50:51]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v127, 24, v67
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v139, 8, v67
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v47, v2, v17, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[85:86], 24, v[48:49]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[86:87], 24, v[37:38]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v137, 8, v67
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v42, v2, v17, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v5, 16, 1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v8
@@ -162349,23 +162350,23 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v3
; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v3, 0x7fff
; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v6, 16, 1
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.l, v47.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v62, v2, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v50
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v56, v2, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v4, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-TRUE16-NEXT: v_add3_u32 v3, v5, v6, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v6
; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v121, 24, v69
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v48
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v68, v1, v17, vcc_lo
; GFX11-TRUE16-NEXT: v_add3_u32 v1, v2, v4, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v7
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v123, 8, v69
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.l, v56.h
; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v74, v3, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v60, v3, v5, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v7
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v10
@@ -162379,8 +162380,8 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
; GFX11-TRUE16-NEXT: v_add3_u32 v4, v4, v3, 0x7fff
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.l, v74.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v52.l, v176.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.l, v60.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v52.l, v166.h
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v82, v1, v7, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v1, v5, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
@@ -162388,21 +162389,20 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
; GFX11-TRUE16-NEXT: v_add3_u32 v1, v1, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v91, v4, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v79, v4, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v5
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v12
; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v11
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.l, v91.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v89, v1, v4, vcc_lo
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.l, v79.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v76, v1, v4 :: v_dual_lshlrev_b32 v1, 16, v9
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v12
; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v99, v2, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v97, v2, v7, vcc_lo
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
@@ -162410,10 +162410,10 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v13
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v54.l, v178.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v99.l, v89.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[132:133], 24, v[82:83]
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v98, v2, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v97.l, v76.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v54.l, v176.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[130:131], 24, v[82:83]
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v96, v2, v6, vcc_lo
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v7, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v5, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
@@ -162421,29 +162421,29 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[54:55]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[80:81], 24, v[52:53]
; GFX11-TRUE16-NEXT: v_add3_u32 v1, v6, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v111, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v106, v2, v3, vcc_lo
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v5
; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v8
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_add3_u32 v3, v7, v4, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v14
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v98.l, v111.h
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v108, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v96.l, v106.h
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v104, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[128:129], 24, v[98:99]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[96:97]
; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v115, v3, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v113, v3, v7, vcc_lo
; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v13
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v115.l, v108.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v90, 24, v99
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v114, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v113.l, v104.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[118:119], 24, v[31:32]
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v112, v2, v3, vcc_lo
; GFX11-TRUE16-NEXT: v_dual_add_f32 v2, 0x40c00000, v4 :: v_dual_add_f32 v3, 0x40c00000, v5
; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
@@ -162452,8 +162452,8 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v64.l, v180.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v138, v4, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v64.l, v178.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v127, v4, v5, vcc_lo
; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v2, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
@@ -162461,19 +162461,19 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1
; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v9
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v131, v4, v5, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v129, v4, v5, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX11-TRUE16-NEXT: v_add3_u32 v3, v8, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v16
; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v15
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v137, v6, v7, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v125, v6, v7, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v5
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v15
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v114.l, v138.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v130, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.l, v40.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v128, v3, v4, vcc_lo
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v16
; GFX11-TRUE16-NEXT: v_add3_u32 v4, v6, v2, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
@@ -162481,11 +162481,11 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v68.l, v62.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v131.l, v137.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v129.l, v125.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v112.l, v127.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v5
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v153, v4, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v142, v4, v6, vcc_lo
; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
@@ -162494,83 +162494,82 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v8
; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v130.l, v153.h
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v152, v2, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v128.l, v142.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v141, v2, v9, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[116:117], 24, v[114:115]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[133:134], 24, v[68:69]
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v154, v7, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v69.l, v42.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[114:115], 24, v[112:113]
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v143, v7, v11, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v10, v6, 0x7fff
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[117:118], 24, v[33:34]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[112:113], 24, v[130:131]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[134:135], 24, v[66:67]
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v145, v4, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[144:145], 24, v[66:67]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[131:132], 24, v[68:69]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[145:146], 24, v[64:65]
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v134, v4, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v145.l, v152.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[146:147], 24, v[64:65]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[118:119], 24, v[31:32]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v59, 24, v131
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v144, v2, v3, vcc_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e64 v144.l, v154.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v44, 24, v145
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v46, 8, v145
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 8, v131
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v72, 8, v130
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[100:101], 24, v[144:145]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[37:38]
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v134.l, v141.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[115:116], 24, v[33:34]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v61, 24, v129
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v63, 8, v129
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v133, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e64 v133.l, v143.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 24, v134
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v47, 8, v134
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v73, 8, v128
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v75, 24, v113
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[100:101], 24, v[133:134]
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[101:102], 24, v[128:129]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[102:103], 24, v[35:36]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v57, 8, v144
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v75, 24, v115
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v77, 8, v115
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v79, 8, v114
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v93, 8, v99
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v95, 8, v98
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v105, 24, v83
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 8, v83
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v110, 8, v82
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v125, 8, v68
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v140, 8, v66
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v142, 8, v65
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v143, 8, v64
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v40, 8, v54
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v45, 8, v52
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v60, 8, v50
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v76, 8, v48
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v58, 8, v133
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v78, 8, v113
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v89, 8, v112
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v91, 24, v97
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v93, 8, v97
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v95, 8, v96
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v107, 24, v83
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v109, 8, v83
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v111, 8, v82
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v121, 24, v69
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v123, 8, v69
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v126, 8, v68
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v138, 8, v66
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v152, 8, v64
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v182, 8, v54
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v43, 8, v52
; GFX11-TRUE16-NEXT: .LBB90_4: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v180.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.h, 8, v143.l
+; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0xff, v178.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v1.h, 8, v152.l
; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v64.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v146.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v145.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v65.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v1.l, v1.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v141.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v139.l
; GFX11-TRUE16-NEXT: v_or_b16 v1.h, v2.l, v2.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v179.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v142.l
+; GFX11-TRUE16-NEXT: v_and_b16 v2.l, 0xff, v177.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v2.h, 8, v140.l
; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v66.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v134.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v144.l
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v5, v1
; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v67.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v2.l, v2.h
; GFX11-TRUE16-NEXT: v_or_b16 v2.h, v3.l, v3.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v43.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v140.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v127.l
+; GFX11-TRUE16-NEXT: v_and_b16 v3.l, 0xff, v40.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v3.h, 8, v138.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v136.l
; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v68.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v5, v2
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v133.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v131.l
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v3.l, v3.h
; GFX11-TRUE16-NEXT: v_or_b16 v3.h, v4.l, v4.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v183.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v139.l
+; GFX11-TRUE16-NEXT: v_and_b16 v4.l, 0xff, v179.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v4.h, 8, v137.l
; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v69.h
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v121.l
; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v5, v3
@@ -162578,89 +162577,89 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v4.l, v4.h
; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v6.l, v6.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v62.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v125.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v132.l
+; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v56.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v6.h, 8, v126.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v130.l
; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v83.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v105.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v107.l
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v6.l, v6.h
; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v7.l, v7.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v47.h
+; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v42.h
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v123.l
-; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v98.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v128.l
+; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v96.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v117.l
; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v5, v6
-; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v99.h
+; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v97.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v7.l, v7.h
; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v8.l, v8.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v91.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v110.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v90.l
-; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v114.h
+; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v79.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v8.h, 8, v111.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v91.l
+; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v112.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v5, v7
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v116.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v114.l
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v8.l, v8.h
; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v9.l, v9.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v74.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v107.l
-; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v115.h
+; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v60.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v109.l
+; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v113.h
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v75.l
; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v5, v8
-; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v130.h
+; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v128.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v9.l, v9.h
; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v10.l, v10.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v111.h
+; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v106.h
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v10.h, 8, v95.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v112.l
-; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v131.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v101.l
+; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v129.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v5, v9
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v59.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v61.l
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v10.l, v10.h
; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v11.l, v11.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v89.h
+; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v76.h
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v93.l
-; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v144.h
+; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v133.h
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v100.l
; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v5, v10
-; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v145.h
+; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v134.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v11.l, v11.h
; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v12.l, v12.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v138.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v79.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v44.l
+; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v127.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v89.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v45.l
; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v31.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v5, v11
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v118.l
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v12.l, v12.h
; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v13.l, v13.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v108.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v77.l
+; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v104.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v13.h, 8, v78.l
; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v32.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v124.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v120.l
; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, v5, v12
; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v13.l, v13.h
; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v14.l, v14.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v153.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v72.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v117.l
+; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v142.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v73.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v115.l
; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v34.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v5, v13
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v109.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v105.l
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v14.l, v14.h
; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v15.l, v15.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v137.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v61.l
+; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v125.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v63.l
; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v35.h
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v102.l
; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v5, v14
@@ -162668,71 +162667,71 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v15.l, v15.h
; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v16.l, v16.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v154.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v57.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v94.l
+; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v143.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v16.h, 8, v58.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v90.l
; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v37.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v5, v15
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v101.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v86.l
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v16.l, v16.h
; GFX11-TRUE16-NEXT: v_or_b16 v16.h, v17.l, v17.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v16.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v152.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v46.l
+; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v141.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v47.l
; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v38.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v78.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v74.l
; GFX11-TRUE16-NEXT: v_or_b32_e32 v16, v5, v16
; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v48.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v17.l, v17.h
; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v18.l, v18.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v148.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v136.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v96.l
+; GFX11-TRUE16-NEXT: v_and_b16 v18.l, 0xff, v135.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v18.h, 8, v124.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v85.l
; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v49.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v5, v17
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v63.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v59.l
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v18.l, v18.h
; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v19.l, v19.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v5.h
; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v126.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v19.h, 8, v122.l
; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v50.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v86.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v84.l
; GFX11-TRUE16-NEXT: v_or_b32_e32 v18, v5, v18
; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v51.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v19.l, v19.h
; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v20.l, v20.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v150.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v122.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v56.l
+; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v148.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v20.h, 8, v110.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v44.l
; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v52.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v5, v19
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v31.h, 8, v80.l
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v20.l, v20.h
; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v21.l, v21.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v149.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v120.l
+; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v147.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v21.h, 8, v108.l
; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v53.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v41.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v183.l
; GFX11-TRUE16-NEXT: v_or_b32_e32 v20, v5, v20
; GFX11-TRUE16-NEXT: v_and_b16 v33.l, 0xff, v54.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v21.l, v21.h
; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v22.l, v22.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v160.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v106.l
+; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v150.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v22.h, 8, v94.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v70.l
; GFX11-TRUE16-NEXT: v_and_b16 v34.l, 0xff, v55.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v5, v21
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v181.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v180.l
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v22.l, v22.h
; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v23.l, v23.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v151.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v104.l
+; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v149.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v23.h, 8, v92.l
; GFX11-TRUE16-NEXT: s_clause 0x1
; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[1:4], off
; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[6:9], off offset:16
@@ -162740,71 +162739,71 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v23.l, v23.h
; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v24.l, v24.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v162.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v92.l
+; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v160.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v24.h, 8, v88.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v5, v23
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v24.l, v24.h
; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v25.l, v25.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v161.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v88.l
+; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v151.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v25.h, 8, v77.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v24, v5, v24
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v25.l, v25.h
; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v26.l, v26.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v164.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v76.l
+; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v162.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v26.h, 8, v72.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v5, v25
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v26.l, v26.h
; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v27.l, v27.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v163.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v73.l
+; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v161.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v27.h, 8, v62.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v26, v5, v26
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v27.l, v27.h
; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v28.l, v28.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v166.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v60.l
+; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v164.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v28.h, 8, v57.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v5, v27
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v28.l, v28.h
; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v29.l, v29.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v165.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v58.l
+; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v163.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v29.h, 8, v46.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v28, v5, v28
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v29.l, v29.h
; GFX11-TRUE16-NEXT: v_or_b16 v29.h, v30.l, v30.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v29.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v176.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v45.l
+; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v166.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v30.h, 8, v43.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v29, v5, v29
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v30.l, v30.h
; GFX11-TRUE16-NEXT: v_or_b16 v30.h, v31.l, v31.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v30.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v167.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v31.h, 8, v42.l
+; GFX11-TRUE16-NEXT: v_and_b16 v31.l, 0xff, v165.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v31.h, 8, v41.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v30, v5, v30
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v31.l, v31.h
; GFX11-TRUE16-NEXT: v_or_b16 v31.h, v32.l, v32.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v31.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v178.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v40.l
+; GFX11-TRUE16-NEXT: v_and_b16 v32.l, 0xff, v176.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v32.h, 8, v182.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v31, v5, v31
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v32.l, v32.h
; GFX11-TRUE16-NEXT: v_or_b16 v32.h, v33.l, v33.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v5.h
-; GFX11-TRUE16-NEXT: v_and_b16 v33.l, 0xff, v177.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v182.l
+; GFX11-TRUE16-NEXT: v_and_b16 v33.l, 0xff, v167.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v181.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v32, v5, v32
; GFX11-TRUE16-NEXT: v_or_b16 v5.l, v33.l, v33.h
@@ -162820,66 +162819,64 @@ define <128 x i8> @bitcast_v64bf16_to_v128i8(<64 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[26:29], off offset:96
; GFX11-TRUE16-NEXT: scratch_store_b128 v0, v[30:33], off offset:112
; GFX11-TRUE16-NEXT: s_clause 0x1f
-; GFX11-TRUE16-NEXT: scratch_load_b32 v154, off, s32 offset:12
-; GFX11-TRUE16-NEXT: scratch_load_b32 v153, off, s32 offset:16
-; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:20
-; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:24
-; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:28
-; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:32
-; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:36
-; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:40
-; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:44
-; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:48
-; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:52
-; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:56
-; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:60
-; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:64
-; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:68
-; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:72
-; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:76
-; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:80
-; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:84
-; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:88
-; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:92
-; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:96
-; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:100
-; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:104
-; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:108
-; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:112
-; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:116
-; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:120
-; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:124
-; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:128
-; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:132
-; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:136
-; GFX11-TRUE16-NEXT: s_clause 0x1a
-; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:140
-; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:144
-; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:148
-; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:152
-; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:156
-; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:160
-; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:164
-; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:168
-; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:172
-; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:176
-; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:180
-; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:184
-; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:188
-; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:192
-; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:196
-; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:200
-; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:204
-; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:208
-; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:212
-; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:216
-; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:220
-; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:224
-; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:228
-; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:232
-; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:236
-; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:240
-; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:244
+; GFX11-TRUE16-NEXT: scratch_load_b32 v152, off, s32 offset:12
+; GFX11-TRUE16-NEXT: scratch_load_b32 v143, off, s32 offset:16
+; GFX11-TRUE16-NEXT: scratch_load_b32 v142, off, s32 offset:20
+; GFX11-TRUE16-NEXT: scratch_load_b32 v141, off, s32 offset:24
+; GFX11-TRUE16-NEXT: scratch_load_b32 v140, off, s32 offset:28
+; GFX11-TRUE16-NEXT: scratch_load_b32 v139, off, s32 offset:32
+; GFX11-TRUE16-NEXT: scratch_load_b32 v138, off, s32 offset:36
+; GFX11-TRUE16-NEXT: scratch_load_b32 v137, off, s32 offset:40
+; GFX11-TRUE16-NEXT: scratch_load_b32 v136, off, s32 offset:44
+; GFX11-TRUE16-NEXT: scratch_load_b32 v127, off, s32 offset:48
+; GFX11-TRUE16-NEXT: scratch_load_b32 v126, off, s32 offset:52
+; GFX11-TRUE16-NEXT: scratch_load_b32 v125, off, s32 offset:56
+; GFX11-TRUE16-NEXT: scratch_load_b32 v124, off, s32 offset:60
+; GFX11-TRUE16-NEXT: scratch_load_b32 v123, off, s32 offset:64
+; GFX11-TRUE16-NEXT: scratch_load_b32 v122, off, s32 offset:68
+; GFX11-TRUE16-NEXT: scratch_load_b32 v121, off, s32 offset:72
+; GFX11-TRUE16-NEXT: scratch_load_b32 v120, off, s32 offset:76
+; GFX11-TRUE16-NEXT: scratch_load_b32 v111, off, s32 offset:80
+; GFX11-TRUE16-NEXT: scratch_load_b32 v110, off, s32 offset:84
+; GFX11-TRUE16-NEXT: scratch_load_b32 v109, off, s32 offset:88
+; GFX11-TRUE16-NEXT: scratch_load_b32 v108, off, s32 offset:92
+; GFX11-TRUE16-NEXT: scratch_load_b32 v107, off, s32 offset:96
+; GFX11-TRUE16-NEXT: scratch_load_b32 v106, off, s32 offset:100
+; GFX11-TRUE16-NEXT: scratch_load_b32 v105, off, s32 offset:104
+; GFX11-TRUE16-NEXT: scratch_load_b32 v104, off, s32 offset:108
+; GFX11-TRUE16-NEXT: scratch_load_b32 v95, off, s32 offset:112
+; GFX11-TRUE16-NEXT: scratch_load_b32 v94, off, s32 offset:116
+; GFX11-TRUE16-NEXT: scratch_load_b32 v93, off, s32 offset:120
+; GFX11-TRUE16-NEXT: scratch_load_b32 v92, off, s32 offset:124
+; GFX11-TRUE16-NEXT: scratch_load_b32 v91, off, s32 offset:128
+; GFX11-TRUE16-NEXT: scratch_load_b32 v90, off, s32 offset:132
+; GFX11-TRUE16-NEXT: scratch_load_b32 v89, off, s32 offset:136
+; GFX11-TRUE16-NEXT: s_clause 0x18
+; GFX11-TRUE16-NEXT: scratch_load_b32 v88, off, s32 offset:140
+; GFX11-TRUE16-NEXT: scratch_load_b32 v79, off, s32 offset:144
+; GFX11-TRUE16-NEXT: scratch_load_b32 v78, off, s32 offset:148
+; GFX11-TRUE16-NEXT: scratch_load_b32 v77, off, s32 offset:152
+; GFX11-TRUE16-NEXT: scratch_load_b32 v76, off, s32 offset:156
+; GFX11-TRUE16-NEXT: scratch_load_b32 v75, off, s32 offset:160
+; GFX11-TRUE16-NEXT: scratch_load_b32 v74, off, s32 offset:164
+; GFX11-TRUE16-NEXT: scratch_load_b32 v73, off, s32 offset:168
+; GFX11-TRUE16-NEXT: scratch_load_b32 v72, off, s32 offset:172
+; GFX11-TRUE16-NEXT: scratch_load_b32 v63, off, s32 offset:176
+; GFX11-TRUE16-NEXT: scratch_load_b32 v62, off, s32 offset:180
+; GFX11-TRUE16-NEXT: scratch_load_b32 v61, off, s32 offset:184
+; GFX11-TRUE16-NEXT: scratch_load_b32 v60, off, s32 offset:188
+; GFX11-TRUE16-NEXT: scratch_load_b32 v59, off, s32 offset:192
+; GFX11-TRUE16-NEXT: scratch_load_b32 v58, off, s32 offset:196
+; GFX11-TRUE16-NEXT: scratch_load_b32 v57, off, s32 offset:200
+; GFX11-TRUE16-NEXT: scratch_load_b32 v56, off, s32 offset:204
+; GFX11-TRUE16-NEXT: scratch_load_b32 v47, off, s32 offset:208
+; GFX11-TRUE16-NEXT: scratch_load_b32 v46, off, s32 offset:212
+; GFX11-TRUE16-NEXT: scratch_load_b32 v45, off, s32 offset:216
+; GFX11-TRUE16-NEXT: scratch_load_b32 v44, off, s32 offset:220
+; GFX11-TRUE16-NEXT: scratch_load_b32 v43, off, s32 offset:224
+; GFX11-TRUE16-NEXT: scratch_load_b32 v42, off, s32 offset:228
+; GFX11-TRUE16-NEXT: scratch_load_b32 v41, off, s32 offset:232
+; GFX11-TRUE16-NEXT: scratch_load_b32 v40, off, s32 offset:236
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -186724,55 +186721,55 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr161_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16
@@ -186798,24 +186795,24 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[64:65], 24, v[9:10]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[69:70], 24, v[3:4]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[36:37], 24, v[25:26]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 24, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 24, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 8, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 24, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 8, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 24, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 8, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3
@@ -186827,24 +186824,24 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 24, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v27
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v25
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 24, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v23
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v21
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 24, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v19
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 24, v18
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[38:39], 24, v[15:16]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[1:2]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[23:24]
@@ -186906,24 +186903,24 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[21:22]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[54:55], 24, v[19:20]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[65:66], 24, v[17:18]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 24, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 24, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 8, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 24, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 8, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 24, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 8, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3
@@ -186935,24 +186932,24 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 24, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v27
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v25
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 24, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v23
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v21
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 24, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v19
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 24, v18
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 8, v17
; GFX11-TRUE16-NEXT: .LBB94_4: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
@@ -186990,7 +186987,7 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v4.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v68.l
; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l
; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h
@@ -186998,15 +186995,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v5.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v149.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v147.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v149.l
; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l
; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v39, v5
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v6.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v147.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v67.l
; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l
; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h
@@ -187014,15 +187011,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v7.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v135.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v133.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v135.l
; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l
; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v39, v7
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v8.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v133.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v64.l
; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l
; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h
@@ -187030,15 +187027,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v9.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v129.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v119.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v129.l
; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l
; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v39, v9
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v10.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v119.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v53.l
; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l
; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h
@@ -187046,15 +187043,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v11.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v115.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v113.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v115.l
; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l
; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v39, v11
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v12.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v113.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v50.l
; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l
; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h
@@ -187062,15 +187059,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v13.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v101.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v99.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v101.l
; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l
; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v39, v13
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v14.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v99.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v38.l
; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l
; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h
@@ -187078,8 +187075,8 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v15.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v87.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v85.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v87.l
; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v17.l
; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v17.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v39, v15
@@ -187094,15 +187091,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v17.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v150.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v150.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v148.l
; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v19.l
; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v19.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v39, v17
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v18.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v148.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v54.l
; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v20.l
; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v20.h
@@ -187110,15 +187107,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v19.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v144.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v144.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v134.l
; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v21.l
; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v21.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v39, v19
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v20.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v134.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v51.l
; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v22.l
; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v22.h
@@ -187126,15 +187123,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v21.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v130.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v130.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v128.l
; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v23.l
; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v23.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v39, v21
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v22.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v128.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v48.l
; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v24.l
; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v24.h
@@ -187142,15 +187139,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v23.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v116.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v116.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v114.l
; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v25.l
; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v25.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v39, v23
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v24.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v114.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v36.l
; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v26.l
; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v26.h
@@ -187158,15 +187155,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v25.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v102.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v102.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v100.l
; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v27.l
; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v27.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v39, v25
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v26.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v100.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v35.l
; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v28.l
; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v28.h
@@ -187174,15 +187171,15 @@ define <128 x i8> @bitcast_v64f16_to_v128i8(<64 x half> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v27.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v96.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v96.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v86.l
; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v29.l
; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v29.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v39, v27
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v28.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v86.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v85.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v34.l
; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v30.l
; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v30.h
@@ -209426,55 +209423,55 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr69_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr162_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr161_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr151_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr149_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr147_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr67_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr145_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr135_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr133_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr64_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr131_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr129_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr119_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr53_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr117_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr50_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr103_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr38_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr65_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr160_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr150_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr148_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr146_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr54_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr144_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr134_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr132_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr51_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr130_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr128_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr118_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr36_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16
@@ -209500,24 +209497,24 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[64:65], 24, v[9:10]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[69:70], 24, v[3:4]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[36:37], 24, v[25:26]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 24, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 24, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 8, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 24, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 8, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 24, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 8, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3
@@ -209529,24 +209526,24 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 24, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v27
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v25
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 24, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v23
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v21
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 24, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v19
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 24, v18
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[38:39], 24, v[15:16]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[70:71], 24, v[1:2]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[23:24]
@@ -209608,24 +209605,24 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[21:22]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[54:55], 24, v[19:20]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[65:66], 24, v[17:18]
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 24, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 8, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 24, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 8, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v11
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 24, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 8, v10
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v9
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 24, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 8, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v7
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 24, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 8, v6
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v5
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 24, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v97, 8, v16
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v15
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v103, 8, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v13
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v12
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v119, 8, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v129, 24, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v131, 8, v10
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v133, 8, v9
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v135, 24, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v145, 8, v8
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v147, 8, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v149, 24, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v151, 8, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v5
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v161, 24, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v162, 8, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v163, 8, v3
@@ -209637,24 +209634,24 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v82, 8, v31
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 24, v30
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v30
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v29
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 24, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v28
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v27
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 24, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v26
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v25
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 24, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 8, v23
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 24, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v22
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 8, v21
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 24, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v20
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 8, v19
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 24, v18
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v160, 8, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 8, v29
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 24, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 8, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 24, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v26
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v112, 8, v25
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 24, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v24
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v118, 8, v23
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v128, 24, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v130, 8, v22
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v132, 8, v21
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v134, 24, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v144, 8, v20
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v146, 8, v19
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v148, 24, v18
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v150, 8, v18
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v37, 8, v17
; GFX11-TRUE16-NEXT: .LBB98_4: ; %end
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
@@ -209692,7 +209689,7 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v4.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v4.h, v4.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v68.l
; GFX11-TRUE16-NEXT: v_and_b16 v6.l, 0xff, v6.l
; GFX11-TRUE16-NEXT: v_and_b16 v6.h, 0xff, v6.h
@@ -209700,15 +209697,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v5.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v5.h, v5.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v149.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v147.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v151.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v149.l
; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v7.l
; GFX11-TRUE16-NEXT: v_and_b16 v7.h, 0xff, v7.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v39, v5
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v6.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v6.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v147.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v67.l
; GFX11-TRUE16-NEXT: v_and_b16 v8.l, 0xff, v8.l
; GFX11-TRUE16-NEXT: v_and_b16 v8.h, 0xff, v8.h
@@ -209716,15 +209713,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v7.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v7.h, v7.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v7.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v135.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v133.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v145.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v135.l
; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v9.l
; GFX11-TRUE16-NEXT: v_and_b16 v9.h, 0xff, v9.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, v39, v7
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v8.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v8.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v133.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v64.l
; GFX11-TRUE16-NEXT: v_and_b16 v10.l, 0xff, v10.l
; GFX11-TRUE16-NEXT: v_and_b16 v10.h, 0xff, v10.h
@@ -209732,15 +209729,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v9.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v9.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v129.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v119.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v131.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v129.l
; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v11.l
; GFX11-TRUE16-NEXT: v_and_b16 v11.h, 0xff, v11.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, v39, v9
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v10.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v10.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v119.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v53.l
; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v12.l
; GFX11-TRUE16-NEXT: v_and_b16 v12.h, 0xff, v12.h
@@ -209748,15 +209745,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v11.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v11.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v115.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v113.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v117.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v115.l
; GFX11-TRUE16-NEXT: v_and_b16 v13.l, 0xff, v13.l
; GFX11-TRUE16-NEXT: v_and_b16 v13.h, 0xff, v13.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v39, v11
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v12.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v12.h, v12.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v12.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v113.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v50.l
; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v14.l
; GFX11-TRUE16-NEXT: v_and_b16 v14.h, 0xff, v14.h
@@ -209764,15 +209761,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v13.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v13.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v101.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v99.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v103.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v101.l
; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v15.l
; GFX11-TRUE16-NEXT: v_and_b16 v15.h, 0xff, v15.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v39, v13
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v14.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v14.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v99.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v38.l
; GFX11-TRUE16-NEXT: v_and_b16 v16.l, 0xff, v16.l
; GFX11-TRUE16-NEXT: v_and_b16 v16.h, 0xff, v16.h
@@ -209780,8 +209777,8 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v15.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v15.h, v15.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v15.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v87.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v85.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v97.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v87.l
; GFX11-TRUE16-NEXT: v_and_b16 v17.l, 0xff, v17.l
; GFX11-TRUE16-NEXT: v_and_b16 v17.h, 0xff, v17.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v39, v15
@@ -209796,15 +209793,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v17.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v17.h, v17.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v17.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v160.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v150.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v150.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v148.l
; GFX11-TRUE16-NEXT: v_and_b16 v19.l, 0xff, v19.l
; GFX11-TRUE16-NEXT: v_and_b16 v19.h, 0xff, v19.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v17, v39, v17
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v18.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v18.h, v18.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v148.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v54.l
; GFX11-TRUE16-NEXT: v_and_b16 v20.l, 0xff, v20.l
; GFX11-TRUE16-NEXT: v_and_b16 v20.h, 0xff, v20.h
@@ -209812,15 +209809,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v19.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v19.h, v19.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v19.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v146.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v144.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v144.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v134.l
; GFX11-TRUE16-NEXT: v_and_b16 v21.l, 0xff, v21.l
; GFX11-TRUE16-NEXT: v_and_b16 v21.h, 0xff, v21.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v19, v39, v19
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v20.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v20.h, v20.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v20.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v134.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v51.l
; GFX11-TRUE16-NEXT: v_and_b16 v22.l, 0xff, v22.l
; GFX11-TRUE16-NEXT: v_and_b16 v22.h, 0xff, v22.h
@@ -209828,15 +209825,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v21.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v21.h, v21.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v21.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v132.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v130.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v130.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v128.l
; GFX11-TRUE16-NEXT: v_and_b16 v23.l, 0xff, v23.l
; GFX11-TRUE16-NEXT: v_and_b16 v23.h, 0xff, v23.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v21, v39, v21
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v22.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v22.h, v22.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v22.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v128.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v48.l
; GFX11-TRUE16-NEXT: v_and_b16 v24.l, 0xff, v24.l
; GFX11-TRUE16-NEXT: v_and_b16 v24.h, 0xff, v24.h
@@ -209844,15 +209841,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v23.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v23.h, v23.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v118.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v116.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v116.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v114.l
; GFX11-TRUE16-NEXT: v_and_b16 v25.l, 0xff, v25.l
; GFX11-TRUE16-NEXT: v_and_b16 v25.h, 0xff, v25.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v23, v39, v23
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v24.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v24.h, v24.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v114.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v36.l
; GFX11-TRUE16-NEXT: v_and_b16 v26.l, 0xff, v26.l
; GFX11-TRUE16-NEXT: v_and_b16 v26.h, 0xff, v26.h
@@ -209860,15 +209857,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v25.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v25.h, v25.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v112.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v102.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v102.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v100.l
; GFX11-TRUE16-NEXT: v_and_b16 v27.l, 0xff, v27.l
; GFX11-TRUE16-NEXT: v_and_b16 v27.h, 0xff, v27.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v25, v39, v25
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v26.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v26.h, v26.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v100.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v35.l
; GFX11-TRUE16-NEXT: v_and_b16 v28.l, 0xff, v28.l
; GFX11-TRUE16-NEXT: v_and_b16 v28.h, 0xff, v28.h
@@ -209876,15 +209873,15 @@ define <128 x i8> @bitcast_v64i16_to_v128i8(<64 x i16> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v27.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v27.h, v27.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v98.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v96.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v96.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.h, 8, v86.l
; GFX11-TRUE16-NEXT: v_and_b16 v29.l, 0xff, v29.l
; GFX11-TRUE16-NEXT: v_and_b16 v29.h, 0xff, v29.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v27, v39, v27
; GFX11-TRUE16-NEXT: v_or_b16 v39.l, v28.l, v33.h
; GFX11-TRUE16-NEXT: v_or_b16 v28.h, v28.h, v34.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v39.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v86.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v33.h, 8, v85.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v34.l, 8, v34.l
; GFX11-TRUE16-NEXT: v_and_b16 v30.l, 0xff, v30.l
; GFX11-TRUE16-NEXT: v_and_b16 v30.h, 0xff, v30.h
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
index e33493c..d3fbba3 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
@@ -85072,13 +85072,13 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr116_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr115_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr68_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr23_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr48_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr66_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr102_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr101_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr100_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr25_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr35_lo16
@@ -85086,20 +85086,20 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr99_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr98_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr97_hi16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_lo16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr27_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr34_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr82_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr86_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr85_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr112_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr84_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr32_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr30_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr96_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr87_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr83_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr81_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr80_lo16
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr114_hi16
+; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr113_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr71_lo16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr37_hi16
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr29_lo16
@@ -85119,18 +85119,18 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 24, v16
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 8, v16
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v71, 8, v15
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 24, v14
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 8, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 24, v14
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 8, v14
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v13
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v12
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v12
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v11
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v11
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v98, 24, v10
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v99, 8, v10
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v100, 8, v9
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v8
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v8
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v7
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v7
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v115, 24, v6
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v116, 8, v6
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v117, 8, v5
@@ -85159,19 +85159,19 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.h, v7.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v66.h, v8.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v24.h, v8.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v83.h, v9.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.h, v9.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.h, v9.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v67.h, v10.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.h, v10.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v97.h, v11.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.h, v11.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v82.h, v12.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v80.h, v12.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.h, v12.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v112.h, v13.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.h, v13.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v96.h, v14.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v87.h, v14.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v14.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v114.h, v15.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v113.h, v15.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.h, v15.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v103.h, v16.l
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.h, v16.h
@@ -85345,29 +85345,29 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v13
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v23.l, v68.h
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v67.h
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v12
; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v26.l, v67.h
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[48:49], 24, v[23:24]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[49:50], 24, v[21:22]
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v25, v2, v6, vcc_lo
; GFX11-TRUE16-NEXT: v_add3_u32 v2, v7, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v5, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[49:50], 24, v[21:22]
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[50:51], 24, v[19:20]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[17:18]
; GFX11-TRUE16-NEXT: v_add3_u32 v1, v6, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v83, v2, v3, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v82, v2, v3, vcc_lo
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v5
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v8 :: v_dual_lshlrev_b32 v5, 16, v14
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v83.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v24
-; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v82, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v25.l, v82.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[51:52], 24, v[17:18]
+; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v80, v1, v2 :: v_dual_lshlrev_b32 v1, 16, v11
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v4, 16, 1
; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
@@ -85384,82 +85384,81 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v13
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v24
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v113, 8, v23
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v80.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v101, 24, v24
; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v27, v2, v3 :: v_dual_add_f32 v2, 0x40c00000, v4
; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v5
; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v6
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v3, 16, 1
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v28.l, v82.h
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 0x40c00000, v6
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v28
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v97, v4, v5, vcc_lo
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v2, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX11-TRUE16-NEXT: v_add3_u32 v6, v8, v3, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX11-TRUE16-NEXT: v_bfe_u32 v8, v1, 16, 1
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v97.h
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v9
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v33, v4, v5, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v13
-; GFX11-TRUE16-NEXT: v_add3_u32 v3, v8, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add3_u32 v3, v8, v1, 0x7fff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v15
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v96, v6, v7, vcc_lo
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 0x40c00000, v9
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v27.l, v97.h
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v87, v6, v7, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v28
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v102, 8, v24
; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[34:35], 24, v[27:28]
-; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[35:36], 24, v[25:26]
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v96.h
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v85, 24, v28
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.l, v87.h
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v32, v3, v4, vcc_lo
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v16
; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v2, 16, 1
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v80, 24, v33
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 8, v33
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; GFX11-TRUE16-NEXT: v_lshrrev_b64 v[35:36], 24, v[25:26]
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v81, 24, v33
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v83, 8, v33
; GFX11-TRUE16-NEXT: v_add3_u32 v4, v6, v2, 0x7fff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v16
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v86, 8, v28
-; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v96, 8, v27
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v114, 8, v23
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_dual_cndmask_b32 v112, v4, v6 :: v_dual_add_f32 v1, 0x40c00000, v5
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v6, 0x40c00000, v8
-; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, 0x40c00000, v8 :: v_dual_lshlrev_b32 v5, 16, v15
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v32.l, v112.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v1, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v103, v2, v9, vcc_lo
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6
-; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v15
-; GFX11-TRUE16-NEXT: v_add3_u32 v2, v10, v6, 0x7fff
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v84, 8, v32
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v87, 8, v27
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v4, v7, v1, 0x7fff
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v2, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v114, v7, v11, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v2, v2, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v6
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v103, v2, v9, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-TRUE16-NEXT: v_add3_u32 v2, v10, v6, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v113, v7, v11, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v38, v4, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v38.l, v103.h
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v37, v2, v3, vcc_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v114.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v37.l, v113.h
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v69, 24, v38
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v70, 8, v38
@@ -85524,7 +85523,7 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v6.h, v7.l, v7.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v31.h
; GFX11-TRUE16-NEXT: v_and_b16 v7.l, 0xff, v68.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v113.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v7.h, 8, v114.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v98.l
; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v27.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v31, v6
@@ -85541,12 +85540,12 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v8.l, v8.h
; GFX11-TRUE16-NEXT: v_or_b16 v8.h, v9.l, v9.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v8.l, v31.h
-; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v83.h
+; GFX11-TRUE16-NEXT: v_and_b16 v9.l, 0xff, v82.h
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v9.h, 8, v100.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v30.l
; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v33.h
; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, v31, v8
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v80.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v81.l
; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v9.l, v9.h
; GFX11-TRUE16-NEXT: v_or_b16 v9.h, v10.l, v10.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v9.l, v31.h
@@ -85560,14 +85559,14 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v10.h, v11.l, v11.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v10.l, v31.h
; GFX11-TRUE16-NEXT: v_and_b16 v11.l, 0xff, v97.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v87.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v11.h, 8, v96.l
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v17.h, 8, v69.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v31, v10
; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v11.l, v11.h
; GFX11-TRUE16-NEXT: v_or_b16 v11.h, v12.l, v12.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v11.l, v31.h
-; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v82.h
+; GFX11-TRUE16-NEXT: v_and_b16 v12.l, 0xff, v80.h
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v12.h, 8, v86.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v31, v11
@@ -85581,14 +85580,14 @@ define <64 x i8> @bitcast_v32bf16_to_v64i8(<32 x bfloat> %a, i32 %b) {
; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v13.l, v13.h
; GFX11-TRUE16-NEXT: v_or_b16 v13.h, v14.l, v14.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v13.l, v31.h
-; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v96.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v81.l
+; GFX11-TRUE16-NEXT: v_and_b16 v14.l, 0xff, v87.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b16 v14.h, 8, v83.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v31, v13
; GFX11-TRUE16-NEXT: v_or_b16 v31.l, v14.l, v14.h
; GFX11-TRUE16-NEXT: v_or_b16 v14.h, v15.l, v15.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v14.l, v31.h
-; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v114.h
+; GFX11-TRUE16-NEXT: v_and_b16 v15.l, 0xff, v113.h
; GFX11-TRUE16-NEXT: v_lshlrev_b16 v15.h, 8, v71.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v31, v14
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll
index 67c9bfe..ecc715c 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll
@@ -11261,8 +11261,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; VI-NEXT: s_lshr_b32 s5, s17, 24
; VI-NEXT: s_lshr_b32 s8, s17, 16
-; VI-NEXT: s_lshr_b32 s9, s17, 8
-; VI-NEXT: s_lshr_b32 s10, s16, 16
+; VI-NEXT: s_lshr_b32 s10, s17, 8
+; VI-NEXT: s_lshr_b32 s9, s16, 16
; VI-NEXT: s_lshr_b32 s11, s16, 8
; VI-NEXT: s_cbranch_execnz .LBB85_4
; VI-NEXT: .LBB85_2: ; %cmp.true
@@ -11277,9 +11277,9 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; VI-NEXT: s_branch .LBB85_5
; VI-NEXT: .LBB85_3:
; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr4
; VI-NEXT: ; implicit-def: $sgpr9
+; VI-NEXT: ; implicit-def: $sgpr4
+; VI-NEXT: ; implicit-def: $sgpr10
; VI-NEXT: ; implicit-def: $sgpr8
; VI-NEXT: ; implicit-def: $sgpr5
; VI-NEXT: s_branch .LBB85_2
@@ -11287,8 +11287,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v8, s16
; VI-NEXT: v_mov_b32_e32 v9, s17
; VI-NEXT: v_mov_b32_e32 v1, s11
-; VI-NEXT: v_mov_b32_e32 v2, s10
-; VI-NEXT: v_mov_b32_e32 v5, s9
+; VI-NEXT: v_mov_b32_e32 v2, s9
+; VI-NEXT: v_mov_b32_e32 v5, s10
; VI-NEXT: v_mov_b32_e32 v6, s8
; VI-NEXT: v_mov_b32_e32 v7, s5
; VI-NEXT: v_mov_b32_e32 v3, s4
@@ -11306,8 +11306,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; GFX9-NEXT: s_lshr_b32 s5, s17, 24
; GFX9-NEXT: s_lshr_b32 s8, s17, 16
-; GFX9-NEXT: s_lshr_b32 s9, s17, 8
-; GFX9-NEXT: s_lshr_b32 s10, s16, 16
+; GFX9-NEXT: s_lshr_b32 s10, s17, 8
+; GFX9-NEXT: s_lshr_b32 s9, s16, 16
; GFX9-NEXT: s_lshr_b32 s11, s16, 8
; GFX9-NEXT: s_cbranch_execnz .LBB85_4
; GFX9-NEXT: .LBB85_2: ; %cmp.true
@@ -11322,9 +11322,9 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; GFX9-NEXT: s_branch .LBB85_5
; GFX9-NEXT: .LBB85_3:
; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr4
; GFX9-NEXT: ; implicit-def: $sgpr9
+; GFX9-NEXT: ; implicit-def: $sgpr4
+; GFX9-NEXT: ; implicit-def: $sgpr10
; GFX9-NEXT: ; implicit-def: $sgpr8
; GFX9-NEXT: ; implicit-def: $sgpr5
; GFX9-NEXT: s_branch .LBB85_2
@@ -11332,8 +11332,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v8, s16
; GFX9-NEXT: v_mov_b32_e32 v9, s17
; GFX9-NEXT: v_mov_b32_e32 v1, s11
-; GFX9-NEXT: v_mov_b32_e32 v2, s10
-; GFX9-NEXT: v_mov_b32_e32 v5, s9
+; GFX9-NEXT: v_mov_b32_e32 v2, s9
+; GFX9-NEXT: v_mov_b32_e32 v5, s10
; GFX9-NEXT: v_mov_b32_e32 v6, s8
; GFX9-NEXT: v_mov_b32_e32 v7, s5
; GFX9-NEXT: v_mov_b32_e32 v3, s4
@@ -11352,8 +11352,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
; GFX11-NEXT: s_lshr_b32 s3, s1, 24
; GFX11-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-NEXT: s_lshr_b32 s6, s1, 8
-; GFX11-NEXT: s_lshr_b32 s7, s0, 16
+; GFX11-NEXT: s_lshr_b32 s7, s1, 8
+; GFX11-NEXT: s_lshr_b32 s6, s0, 16
; GFX11-NEXT: s_lshr_b32 s8, s0, 8
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB85_4
@@ -11370,16 +11370,16 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; GFX11-NEXT: s_branch .LBB85_5
; GFX11-NEXT: .LBB85_3:
; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr2
; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr2
+; GFX11-NEXT: ; implicit-def: $sgpr7
; GFX11-NEXT: ; implicit-def: $sgpr5
; GFX11-NEXT: ; implicit-def: $sgpr3
; GFX11-NEXT: s_branch .LBB85_2
; GFX11-NEXT: .LBB85_4:
; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1
-; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s7
-; GFX11-NEXT: v_dual_mov_b32 v5, s6 :: v_dual_mov_b32 v6, s5
+; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s6
+; GFX11-NEXT: v_dual_mov_b32 v5, s7 :: v_dual_mov_b32 v6, s5
; GFX11-NEXT: v_mov_b32_e32 v7, s3
; GFX11-NEXT: v_mov_b32_e32 v3, s2
; GFX11-NEXT: .LBB85_5: ; %end
@@ -13517,8 +13517,8 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; GFX9-NEXT: s_lshr_b32 s5, s17, 24
; GFX9-NEXT: s_lshr_b32 s8, s17, 16
-; GFX9-NEXT: s_lshr_b32 s9, s17, 8
-; GFX9-NEXT: s_lshr_b32 s10, s16, 16
+; GFX9-NEXT: s_lshr_b32 s10, s17, 8
+; GFX9-NEXT: s_lshr_b32 s9, s16, 16
; GFX9-NEXT: s_lshr_b32 s11, s16, 8
; GFX9-NEXT: s_cbranch_execnz .LBB97_4
; GFX9-NEXT: .LBB97_2: ; %cmp.true
@@ -13533,9 +13533,9 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; GFX9-NEXT: s_branch .LBB97_5
; GFX9-NEXT: .LBB97_3:
; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr4
; GFX9-NEXT: ; implicit-def: $sgpr9
+; GFX9-NEXT: ; implicit-def: $sgpr4
+; GFX9-NEXT: ; implicit-def: $sgpr10
; GFX9-NEXT: ; implicit-def: $sgpr8
; GFX9-NEXT: ; implicit-def: $sgpr5
; GFX9-NEXT: s_branch .LBB97_2
@@ -13543,8 +13543,8 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; GFX9-NEXT: v_mov_b32_e32 v8, s16
; GFX9-NEXT: v_mov_b32_e32 v9, s17
; GFX9-NEXT: v_mov_b32_e32 v1, s11
-; GFX9-NEXT: v_mov_b32_e32 v2, s10
-; GFX9-NEXT: v_mov_b32_e32 v5, s9
+; GFX9-NEXT: v_mov_b32_e32 v2, s9
+; GFX9-NEXT: v_mov_b32_e32 v5, s10
; GFX9-NEXT: v_mov_b32_e32 v6, s8
; GFX9-NEXT: v_mov_b32_e32 v7, s5
; GFX9-NEXT: v_mov_b32_e32 v3, s4
@@ -13563,8 +13563,8 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
; GFX11-NEXT: s_lshr_b32 s3, s1, 24
; GFX11-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-NEXT: s_lshr_b32 s6, s1, 8
-; GFX11-NEXT: s_lshr_b32 s7, s0, 16
+; GFX11-NEXT: s_lshr_b32 s7, s1, 8
+; GFX11-NEXT: s_lshr_b32 s6, s0, 16
; GFX11-NEXT: s_lshr_b32 s8, s0, 8
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB97_4
@@ -13581,16 +13581,16 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; GFX11-NEXT: s_branch .LBB97_5
; GFX11-NEXT: .LBB97_3:
; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr2
; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr2
+; GFX11-NEXT: ; implicit-def: $sgpr7
; GFX11-NEXT: ; implicit-def: $sgpr5
; GFX11-NEXT: ; implicit-def: $sgpr3
; GFX11-NEXT: s_branch .LBB97_2
; GFX11-NEXT: .LBB97_4:
; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1
-; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s7
-; GFX11-NEXT: v_dual_mov_b32 v5, s6 :: v_dual_mov_b32 v6, s5
+; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s6
+; GFX11-NEXT: v_dual_mov_b32 v5, s7 :: v_dual_mov_b32 v6, s5
; GFX11-NEXT: v_mov_b32_e32 v7, s3
; GFX11-NEXT: v_mov_b32_e32 v3, s2
; GFX11-NEXT: .LBB97_5: ; %end
@@ -15345,8 +15345,8 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; GFX9-NEXT: s_lshr_b32 s5, s17, 24
; GFX9-NEXT: s_lshr_b32 s8, s17, 16
-; GFX9-NEXT: s_lshr_b32 s9, s17, 8
-; GFX9-NEXT: s_lshr_b32 s10, s16, 16
+; GFX9-NEXT: s_lshr_b32 s10, s17, 8
+; GFX9-NEXT: s_lshr_b32 s9, s16, 16
; GFX9-NEXT: s_lshr_b32 s11, s16, 8
; GFX9-NEXT: s_cbranch_execnz .LBB105_4
; GFX9-NEXT: .LBB105_2: ; %cmp.true
@@ -15362,9 +15362,9 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; GFX9-NEXT: s_branch .LBB105_5
; GFX9-NEXT: .LBB105_3:
; GFX9-NEXT: ; implicit-def: $sgpr11
-; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: ; implicit-def: $sgpr4
; GFX9-NEXT: ; implicit-def: $sgpr9
+; GFX9-NEXT: ; implicit-def: $sgpr4
+; GFX9-NEXT: ; implicit-def: $sgpr10
; GFX9-NEXT: ; implicit-def: $sgpr8
; GFX9-NEXT: ; implicit-def: $sgpr5
; GFX9-NEXT: s_branch .LBB105_2
@@ -15372,8 +15372,8 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; GFX9-NEXT: v_mov_b32_e32 v8, s16
; GFX9-NEXT: v_mov_b32_e32 v9, s17
; GFX9-NEXT: v_mov_b32_e32 v1, s11
-; GFX9-NEXT: v_mov_b32_e32 v2, s10
-; GFX9-NEXT: v_mov_b32_e32 v5, s9
+; GFX9-NEXT: v_mov_b32_e32 v2, s9
+; GFX9-NEXT: v_mov_b32_e32 v5, s10
; GFX9-NEXT: v_mov_b32_e32 v6, s8
; GFX9-NEXT: v_mov_b32_e32 v7, s5
; GFX9-NEXT: v_mov_b32_e32 v3, s4
@@ -15392,8 +15392,8 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
; GFX11-NEXT: s_lshr_b32 s3, s1, 24
; GFX11-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-NEXT: s_lshr_b32 s6, s1, 8
-; GFX11-NEXT: s_lshr_b32 s7, s0, 16
+; GFX11-NEXT: s_lshr_b32 s7, s1, 8
+; GFX11-NEXT: s_lshr_b32 s6, s0, 16
; GFX11-NEXT: s_lshr_b32 s8, s0, 8
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB105_4
@@ -15410,16 +15410,16 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; GFX11-NEXT: s_branch .LBB105_5
; GFX11-NEXT: .LBB105_3:
; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr2
; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr2
+; GFX11-NEXT: ; implicit-def: $sgpr7
; GFX11-NEXT: ; implicit-def: $sgpr5
; GFX11-NEXT: ; implicit-def: $sgpr3
; GFX11-NEXT: s_branch .LBB105_2
; GFX11-NEXT: .LBB105_4:
; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1
-; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s7
-; GFX11-NEXT: v_dual_mov_b32 v5, s6 :: v_dual_mov_b32 v6, s5
+; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s6
+; GFX11-NEXT: v_dual_mov_b32 v5, s7 :: v_dual_mov_b32 v6, s5
; GFX11-NEXT: v_mov_b32_e32 v7, s3
; GFX11-NEXT: v_mov_b32_e32 v3, s2
; GFX11-NEXT: .LBB105_5: ; %end
@@ -16493,8 +16493,8 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
; VI-NEXT: s_lshr_b32 s8, s17, 24
; VI-NEXT: s_lshr_b32 s5, s17, 16
-; VI-NEXT: s_lshr_b32 s9, s17, 8
-; VI-NEXT: s_lshr_b32 s10, s16, 16
+; VI-NEXT: s_lshr_b32 s10, s17, 8
+; VI-NEXT: s_lshr_b32 s9, s16, 16
; VI-NEXT: s_lshr_b32 s11, s16, 8
; VI-NEXT: s_cbranch_execnz .LBB109_4
; VI-NEXT: .LBB109_2: ; %cmp.true
@@ -16546,16 +16546,16 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB109_3:
; VI-NEXT: ; implicit-def: $sgpr11
-; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: ; implicit-def: $sgpr4
; VI-NEXT: ; implicit-def: $sgpr9
+; VI-NEXT: ; implicit-def: $sgpr4
+; VI-NEXT: ; implicit-def: $sgpr10
; VI-NEXT: ; implicit-def: $sgpr5
; VI-NEXT: ; implicit-def: $sgpr8
; VI-NEXT: s_branch .LBB109_2
; VI-NEXT: .LBB109_4:
; VI-NEXT: v_mov_b32_e32 v1, s11
-; VI-NEXT: v_mov_b32_e32 v2, s10
-; VI-NEXT: v_mov_b32_e32 v5, s9
+; VI-NEXT: v_mov_b32_e32 v2, s9
+; VI-NEXT: v_mov_b32_e32 v5, s10
; VI-NEXT: v_mov_b32_e32 v7, s8
; VI-NEXT: v_mov_b32_e32 v3, s4
; VI-NEXT: v_mov_b32_e32 v0, s16
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
index 97df2a0..258bc295 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
@@ -5548,7 +5548,6 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX7LESS: ; %bb.0: ; %entry
; GFX7LESS-NEXT: s_mov_b64 s[6:7], exec
; GFX7LESS-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7LESS-NEXT: s_mov_b32 s4, 0
; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s6, 0
; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v4, s7, v0
; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
@@ -5557,33 +5556,32 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX7LESS-NEXT: s_cbranch_execz .LBB9_4
; GFX7LESS-NEXT: ; %bb.1:
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_load_dwordx2 s[14:15], s[2:3], 0x0
-; GFX7LESS-NEXT: s_bcnt1_i32_b64 s5, s[6:7]
+; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GFX7LESS-NEXT: s_mov_b64 s[10:11], 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, s4
; GFX7LESS-NEXT: s_mov_b32 s7, 0xf000
-; GFX7LESS-NEXT: s_mul_i32 s12, s5, 5
+; GFX7LESS-NEXT: s_mul_i32 s12, s6, 5
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, s14
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s15
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, s4
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, s5
; GFX7LESS-NEXT: s_mov_b32 s6, -1
; GFX7LESS-NEXT: s_mov_b32 s4, s2
; GFX7LESS-NEXT: s_mov_b32 s5, s3
; GFX7LESS-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7LESS-NEXT: v_mov_b32_e32 v9, v1
-; GFX7LESS-NEXT: v_mov_b32_e32 v8, v0
-; GFX7LESS-NEXT: v_subrev_i32_e32 v6, vcc, s12, v8
-; GFX7LESS-NEXT: v_subb_u32_e32 v7, vcc, v9, v5, vcc
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v0
+; GFX7LESS-NEXT: v_subrev_i32_e32 v5, vcc, s12, v7
+; GFX7LESS-NEXT: v_subbrev_u32_e32 v6, vcc, 0, v8, vcc
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, v6
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, v7
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, v8
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, v9
+; GFX7LESS-NEXT: v_mov_b32_e32 v0, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v1, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v7
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v8
; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
; GFX7LESS-NEXT: buffer_wbinvl1
-; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
; GFX7LESS-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[10:11]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB9_2
@@ -5611,39 +5609,37 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0
; GFX8-NEXT: v_mbcnt_hi_u32_b32 v4, s7, v0
-; GFX8-NEXT: s_mov_b32 s4, 0
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX8-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX8-NEXT: s_and_saveexec_b64 s[8:9], vcc
; GFX8-NEXT: s_cbranch_execz .LBB9_4
; GFX8-NEXT: ; %bb.1:
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: s_load_dwordx2 s[14:15], s[2:3], 0x0
-; GFX8-NEXT: s_bcnt1_i32_b64 s5, s[6:7]
+; GFX8-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; GFX8-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GFX8-NEXT: s_mov_b64 s[10:11], 0
-; GFX8-NEXT: v_mov_b32_e32 v5, s4
-; GFX8-NEXT: s_mul_i32 s12, s5, 5
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, s14
-; GFX8-NEXT: v_mov_b32_e32 v1, s15
; GFX8-NEXT: s_mov_b32 s7, 0xf000
+; GFX8-NEXT: s_mul_i32 s12, s6, 5
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: v_mov_b32_e32 v1, s5
; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_mov_b32 s4, s2
; GFX8-NEXT: s_mov_b32 s5, s3
; GFX8-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX8-NEXT: v_mov_b32_e32 v9, v1
-; GFX8-NEXT: v_mov_b32_e32 v8, v0
-; GFX8-NEXT: v_subrev_u32_e32 v6, vcc, s12, v8
-; GFX8-NEXT: v_subb_u32_e32 v7, vcc, v9, v5, vcc
-; GFX8-NEXT: v_mov_b32_e32 v0, v6
-; GFX8-NEXT: v_mov_b32_e32 v1, v7
-; GFX8-NEXT: v_mov_b32_e32 v2, v8
-; GFX8-NEXT: v_mov_b32_e32 v3, v9
+; GFX8-NEXT: v_mov_b32_e32 v8, v1
+; GFX8-NEXT: v_mov_b32_e32 v7, v0
+; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s12, v7
+; GFX8-NEXT: v_subbrev_u32_e32 v6, vcc, 0, v8, vcc
+; GFX8-NEXT: v_mov_b32_e32 v0, v5
+; GFX8-NEXT: v_mov_b32_e32 v1, v6
+; GFX8-NEXT: v_mov_b32_e32 v2, v7
+; GFX8-NEXT: v_mov_b32_e32 v3, v8
; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1_vol
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
; GFX8-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
; GFX8-NEXT: s_andn2_b64 exec, exec, s[10:11]
; GFX8-NEXT: s_cbranch_execnz .LBB9_2
@@ -5670,39 +5666,37 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX9-NEXT: s_mov_b64 s[6:7], exec
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0
; GFX9-NEXT: v_mbcnt_hi_u32_b32 v4, s7, v0
-; GFX9-NEXT: s_mov_b32 s4, 0
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX9-NEXT: s_and_saveexec_b64 s[8:9], vcc
; GFX9-NEXT: s_cbranch_execz .LBB9_4
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[14:15], s[2:3], 0x0
-; GFX9-NEXT: s_bcnt1_i32_b64 s5, s[6:7]
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; GFX9-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GFX9-NEXT: s_mov_b64 s[10:11], 0
-; GFX9-NEXT: v_mov_b32_e32 v5, s4
-; GFX9-NEXT: s_mul_i32 s12, s5, 5
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v0, s14
-; GFX9-NEXT: v_mov_b32_e32 v1, s15
; GFX9-NEXT: s_mov_b32 s7, 0xf000
+; GFX9-NEXT: s_mul_i32 s12, s6, 5
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: s_mov_b32 s6, -1
; GFX9-NEXT: s_mov_b32 s4, s2
; GFX9-NEXT: s_mov_b32 s5, s3
; GFX9-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT: v_mov_b32_e32 v9, v1
-; GFX9-NEXT: v_mov_b32_e32 v8, v0
-; GFX9-NEXT: v_subrev_co_u32_e32 v6, vcc, s12, v8
-; GFX9-NEXT: v_subb_co_u32_e32 v7, vcc, v9, v5, vcc
-; GFX9-NEXT: v_mov_b32_e32 v0, v6
-; GFX9-NEXT: v_mov_b32_e32 v1, v7
-; GFX9-NEXT: v_mov_b32_e32 v2, v8
-; GFX9-NEXT: v_mov_b32_e32 v3, v9
+; GFX9-NEXT: v_mov_b32_e32 v8, v1
+; GFX9-NEXT: v_mov_b32_e32 v7, v0
+; GFX9-NEXT: v_subrev_co_u32_e32 v5, vcc, s12, v7
+; GFX9-NEXT: v_subbrev_co_u32_e32 v6, vcc, 0, v8, vcc
+; GFX9-NEXT: v_mov_b32_e32 v0, v5
+; GFX9-NEXT: v_mov_b32_e32 v1, v6
+; GFX9-NEXT: v_mov_b32_e32 v2, v7
+; GFX9-NEXT: v_mov_b32_e32 v3, v8
; GFX9-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
-; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
; GFX9-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
; GFX9-NEXT: s_andn2_b64 exec, exec, s[10:11]
; GFX9-NEXT: s_cbranch_execnz .LBB9_2
diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll
index c3b14e8..ca50835 100644
--- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll
@@ -57,8 +57,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v2, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB0_1: ; %atomicrmw.start
@@ -69,7 +68,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX10-NEXT: v_add_f32_e32 v4, v5, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -96,9 +95,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_mov_b32_e32 v2, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -106,7 +104,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_add_f32_e32 v4, v5, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -123,9 +121,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_mov_b32_e32 v2, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -133,7 +130,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_add_f32_e32 v4, v5, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -150,9 +147,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX7-NEXT: v_mov_b32_e32 v2, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s6
+; GFX7-NEXT: v_mov_b32_e32 v3, s20
; GFX7-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -160,7 +156,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX7-NEXT: v_add_f32_e32 v4, v5, v2
; GFX7-NEXT: v_mov_b32_e32 v0, v4
; GFX7-NEXT: v_mov_b32_e32 v1, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -245,8 +241,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB1_1: ; %atomicrmw.start
@@ -256,7 +251,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_
; GFX10-NEXT: v_mov_b32_e32 v5, v2
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: v_mov_b32_e32 v4, v1
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -292,16 +287,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB1_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_add_f32_e32 v1, v2, v0
; GFX8-NEXT: v_mov_b32_e32 v5, v2
; GFX8-NEXT: v_mov_b32_e32 v4, v1
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2
@@ -318,16 +312,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v1, s20
; GFX7-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s6
+; GFX7-NEXT: v_mov_b32_e32 v3, s20
; GFX7-NEXT: .LBB1_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_add_f32_e32 v1, v2, v0
; GFX7-NEXT: v_mov_b32_e32 v5, v2
; GFX7-NEXT: v_mov_b32_e32 v4, v1
-; GFX7-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2
@@ -468,7 +461,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory__amdgpu_ignore_denormal_mode:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_add_nc_u32_e32 v9, 0x400, v4
; GFX10-NEXT: s_mov_b32 s5, 0
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
@@ -481,7 +473,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
-; GFX10-NEXT: ; implicit-def: $vgpr4
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB2_1
@@ -507,7 +498,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB2_4
@@ -556,7 +547,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory__amdgpu_ignore_denormal_mode:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_add_u32_e32 v9, 0x400, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -569,7 +559,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
; GFX908-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
-; GFX908-NEXT: ; implicit-def: $vgpr4
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB2_1
; GFX908-NEXT: ; %bb.2:
@@ -594,7 +583,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB2_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
@@ -614,7 +603,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory__amdgpu_ignore_denormal_mode:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v9, vcc, 0x400, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -627,7 +615,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
; GFX8-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
-; GFX8-NEXT: ; implicit-def: $vgpr4
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB2_1
; GFX8-NEXT: ; %bb.2:
@@ -652,7 +639,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB2_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
@@ -672,7 +659,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory__amdgpu_ignore_denormal_mode:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v9, vcc, 0x400, v4
; GFX7-NEXT: s_mov_b64 s[6:7], exec
; GFX7-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -684,7 +670,6 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
-; GFX7-NEXT: ; implicit-def: $vgpr4
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB2_1
; GFX7-NEXT: ; %bb.2:
@@ -709,7 +694,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__waterfall__amdgp
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB2_4
; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
@@ -830,8 +815,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v2, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB3_1: ; %atomicrmw.start
@@ -842,7 +826,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX10-NEXT: v_add_f32_e32 v4, v5, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -860,16 +844,15 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX90A-NEXT: v_mov_b32_e32 v2, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v5, v0
; GFX90A-NEXT: v_add_f32_e32 v4, v5, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -886,9 +869,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_mov_b32_e32 v2, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -896,7 +878,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_add_f32_e32 v4, v5, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -913,9 +895,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_mov_b32_e32 v2, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -923,7 +904,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_add_f32_e32 v4, v5, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -940,9 +921,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX7-NEXT: v_mov_b32_e32 v2, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s6
+; GFX7-NEXT: v_mov_b32_e32 v3, s20
; GFX7-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -950,7 +930,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_fine_g
; GFX7-NEXT: v_add_f32_e32 v4, v5, v2
; GFX7-NEXT: v_mov_b32_e32 v0, v4
; GFX7-NEXT: v_mov_b32_e32 v1, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1035,8 +1015,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB4_1: ; %atomicrmw.start
@@ -1046,7 +1025,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_
; GFX10-NEXT: v_mov_b32_e32 v5, v2
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: v_mov_b32_e32 v4, v1
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -1064,15 +1043,13 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v3, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, s6
; GFX90A-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_add_f32_e32 v2, v3, v0
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
@@ -1089,16 +1066,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: v_add_f32_e32 v1, v2, v0
; GFX908-NEXT: v_mov_b32_e32 v5, v2
; GFX908-NEXT: v_mov_b32_e32 v4, v1
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2
@@ -1115,16 +1091,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_add_f32_e32 v1, v2, v0
; GFX8-NEXT: v_mov_b32_e32 v5, v2
; GFX8-NEXT: v_mov_b32_e32 v4, v1
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2
@@ -1141,16 +1116,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f32__offset__amdgpu_no_fine_
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v1, s20
; GFX7-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s6
+; GFX7-NEXT: v_mov_b32_e32 v3, s20
; GFX7-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_add_f32_e32 v1, v2, v0
; GFX7-NEXT: v_mov_b32_e32 v5, v2
; GFX7-NEXT: v_mov_b32_e32 v4, v1
-; GFX7-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2
@@ -1223,9 +1197,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4
+; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16
; GFX11-NEXT: v_mov_b32_e32 v0, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
@@ -1237,7 +1209,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_f32_e32 v4, v5, v2
; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -1255,8 +1227,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v2, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB5_1: ; %atomicrmw.start
@@ -1267,7 +1238,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX10-NEXT: v_add_f32_e32 v4, v5, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -1285,16 +1256,15 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX90A-NEXT: v_mov_b32_e32 v2, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v5, v0
; GFX90A-NEXT: v_add_f32_e32 v4, v5, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1311,9 +1281,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX908-NEXT: v_mov_b32_e32 v2, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -1321,7 +1290,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX908-NEXT: v_add_f32_e32 v4, v5, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1338,9 +1307,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX8-NEXT: v_mov_b32_e32 v2, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -1348,7 +1316,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX8-NEXT: v_add_f32_e32 v4, v5, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1365,9 +1333,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX7-NEXT: v_mov_b32_e32 v2, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s6
+; GFX7-NEXT: v_mov_b32_e32 v3, s20
; GFX7-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -1375,7 +1342,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset(ptr addrspace(7)
; GFX7-NEXT: v_add_f32_e32 v4, v5, v2
; GFX7-NEXT: v_mov_b32_e32 v0, v4
; GFX7-NEXT: v_mov_b32_e32 v1, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1448,9 +1415,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4
+; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16
; GFX11-NEXT: v_mov_b32_e32 v0, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
@@ -1462,7 +1427,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_f32_e32 v4, v5, v2
; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -1480,8 +1445,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v2, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB6_1: ; %atomicrmw.start
@@ -1492,7 +1456,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX10-NEXT: v_add_f32_e32 v4, v5, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -1510,16 +1474,15 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX90A-NEXT: v_mov_b32_e32 v2, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v5, v0
; GFX90A-NEXT: v_add_f32_e32 v4, v5, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1536,9 +1499,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX908-NEXT: v_mov_b32_e32 v2, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -1546,7 +1508,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX908-NEXT: v_add_f32_e32 v4, v5, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1563,9 +1525,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX8-NEXT: v_mov_b32_e32 v2, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -1573,7 +1534,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX8-NEXT: v_add_f32_e32 v4, v5, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1590,9 +1551,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX7-NEXT: v_mov_b32_e32 v2, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s6
+; GFX7-NEXT: v_mov_b32_e32 v3, s20
; GFX7-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -1600,7 +1560,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX7-NEXT: v_add_f32_e32 v4, v5, v2
; GFX7-NEXT: v_mov_b32_e32 v0, v4
; GFX7-NEXT: v_mov_b32_e32 v1, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1673,9 +1633,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote_memory__amdgpu_ignore_denormal_mode:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4
+; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16
; GFX11-NEXT: v_mov_b32_e32 v0, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
@@ -1687,7 +1645,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_f32_e32 v4, v5, v2
; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -1705,8 +1663,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v2, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB7_1: ; %atomicrmw.start
@@ -1717,7 +1674,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX10-NEXT: v_add_f32_e32 v4, v5, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -1735,16 +1692,15 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX90A-NEXT: v_mov_b32_e32 v2, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v5, v0
; GFX90A-NEXT: v_add_f32_e32 v4, v5, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1761,9 +1717,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX908-NEXT: v_mov_b32_e32 v2, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -1771,7 +1726,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX908-NEXT: v_add_f32_e32 v4, v5, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1788,9 +1743,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX8-NEXT: v_mov_b32_e32 v2, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -1798,7 +1752,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX8-NEXT: v_add_f32_e32 v4, v5, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1815,9 +1769,8 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX7-NEXT: v_mov_b32_e32 v2, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s6
+; GFX7-NEXT: v_mov_b32_e32 v3, s20
; GFX7-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -1825,7 +1778,7 @@ define float @buffer_fat_ptr_agent_atomic_fadd_ret_f32__offset__amdgpu_no_remote
; GFX7-NEXT: v_add_f32_e32 v4, v5, v2
; GFX7-NEXT: v_mov_b32_e32 v0, v4
; GFX7-NEXT: v_mov_b32_e32 v1, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1883,24 +1836,22 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: v_mov_b32_e32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
+; GFX12-NEXT: v_mov_b32_e32 v10, s16
; GFX12-NEXT: s_mov_b32 s4, 0
-; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048
+; GFX12-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_add_f64_e32 v[6:7], v[8:9], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f64_e32 v[7:8], v[9:10], v[4:5]
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7
+; GFX12-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX12-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1925,25 +1876,23 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: v_mov_b32_e32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
+; GFX11-NEXT: v_mov_b32_e32 v10, s16
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7
+; GFX11-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX11-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1958,26 +1907,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX10-NEXT: v_mov_b32_e32 v4, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
; GFX10-NEXT: v_mov_b32_e32 v5, v1
-; GFX10-NEXT: s_add_i32 s4, s20, 0x800
-; GFX10-NEXT: v_mov_b32_e32 v6, s4
-; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
+; GFX10-NEXT: v_mov_b32_e32 v10, s20
; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX10-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v10, v1
-; GFX10-NEXT: v_mov_b32_e32 v9, v0
+; GFX10-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX10-NEXT: v_mov_b32_e32 v0, v7
-; GFX10-NEXT: v_mov_b32_e32 v1, v8
-; GFX10-NEXT: v_mov_b32_e32 v2, v9
-; GFX10-NEXT: v_mov_b32_e32 v3, v10
-; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX10-NEXT: v_mov_b32_e32 v0, v6
+; GFX10-NEXT: v_mov_b32_e32 v1, v7
+; GFX10-NEXT: v_mov_b32_e32 v2, v8
+; GFX10-NEXT: v_mov_b32_e32 v3, v9
+; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX10-NEXT: v_mov_b32_e32 v9, v1
+; GFX10-NEXT: v_mov_b32_e32 v8, v0
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB8_1
@@ -1999,26 +1947,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX908-NEXT: v_mov_b32_e32 v5, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v10, s20
; GFX908-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v0, v6
+; GFX908-NEXT: v_mov_b32_e32 v1, v7
+; GFX908-NEXT: v_mov_b32_e32 v2, v8
+; GFX908-NEXT: v_mov_b32_e32 v3, v9
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX908-NEXT: v_mov_b32_e32 v9, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v8, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB8_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2030,26 +1977,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX8-NEXT: v_mov_b32_e32 v5, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v10, s20
; GFX8-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v0, v6
+; GFX8-NEXT: v_mov_b32_e32 v1, v7
+; GFX8-NEXT: v_mov_b32_e32 v2, v8
+; GFX8-NEXT: v_mov_b32_e32 v3, v9
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: v_mov_b32_e32 v9, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v8, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB8_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2061,26 +2007,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v4, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
+; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX7-NEXT: v_mov_b32_e32 v5, v1
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX7-NEXT: s_add_i32 s6, s20, 0x800
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v6, s6
+; GFX7-NEXT: v_mov_b32_e32 v10, s20
; GFX7-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v10, v1
-; GFX7-NEXT: v_mov_b32_e32 v9, v0
-; GFX7-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v0, v7
-; GFX7-NEXT: v_mov_b32_e32 v1, v8
-; GFX7-NEXT: v_mov_b32_e32 v2, v9
-; GFX7-NEXT: v_mov_b32_e32 v3, v10
-; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v0, v6
+; GFX7-NEXT: v_mov_b32_e32 v1, v7
+; GFX7-NEXT: v_mov_b32_e32 v2, v8
+; GFX7-NEXT: v_mov_b32_e32 v3, v9
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v9, v1
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v8, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB8_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2092,27 +2037,27 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v4, v0
; GFX6-NEXT: v_mov_b32_e32 v0, s20
-; GFX6-NEXT: v_mov_b32_e32 v5, v1
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
+; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX6-NEXT: s_add_i32 s6, s20, 0x800
+; GFX6-NEXT: v_mov_b32_e32 v5, v1
; GFX6-NEXT: s_mov_b64 s[4:5], 0
-; GFX6-NEXT: v_mov_b32_e32 v6, s6
+; GFX6-NEXT: v_mov_b32_e32 v10, s6
; GFX6-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v10, v1
-; GFX6-NEXT: v_mov_b32_e32 v9, v0
-; GFX6-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
+; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v0, v7
-; GFX6-NEXT: v_mov_b32_e32 v1, v8
-; GFX6-NEXT: v_mov_b32_e32 v2, v9
-; GFX6-NEXT: v_mov_b32_e32 v3, v10
-; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX6-NEXT: v_mov_b32_e32 v0, v6
+; GFX6-NEXT: v_mov_b32_e32 v1, v7
+; GFX6-NEXT: v_mov_b32_e32 v2, v8
+; GFX6-NEXT: v_mov_b32_e32 v3, v9
+; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen glc
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v9, v1
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_mov_b32_e32 v8, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB8_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2133,9 +2078,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_mov_b32_e32 v2, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
+; GFX12-NEXT: v_mov_b32_e32 v6, s16
; GFX12-NEXT: s_mov_b32 s4, 0
; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB9_1: ; %atomicrmw.start
@@ -2146,7 +2089,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX12-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[4:5]
@@ -2174,9 +2117,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v2, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
+; GFX11-NEXT: v_mov_b32_e32 v6, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start
@@ -2187,7 +2128,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -2205,8 +2146,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v2, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x800
-; GFX10-NEXT: v_mov_b32_e32 v6, s4
+; GFX10-NEXT: v_mov_b32_e32 v6, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
; GFX10-NEXT: .LBB9_1: ; %atomicrmw.start
@@ -2218,7 +2158,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: v_mov_b32_e32 v8, v3
; GFX10-NEXT: v_mov_b32_e32 v7, v2
-; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -2246,9 +2186,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v2, s20
; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v6, s20
; GFX908-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -2257,7 +2196,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX908-NEXT: v_mov_b32_e32 v9, v4
; GFX908-NEXT: v_mov_b32_e32 v8, v3
; GFX908-NEXT: v_mov_b32_e32 v7, v2
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[4:5]
@@ -2275,9 +2214,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v2, s20
; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v6, s20
; GFX8-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -2286,7 +2224,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX8-NEXT: v_mov_b32_e32 v9, v4
; GFX8-NEXT: v_mov_b32_e32 v8, v3
; GFX8-NEXT: v_mov_b32_e32 v7, v2
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[4:5]
@@ -2304,9 +2242,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
-; GFX7-NEXT: s_add_i32 s6, s20, 0x800
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v6, s6
+; GFX7-NEXT: v_mov_b32_e32 v6, s20
; GFX7-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -2315,7 +2252,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_f64__offset__amdgpu_no_fine_
; GFX7-NEXT: v_mov_b32_e32 v9, v4
; GFX7-NEXT: v_mov_b32_e32 v8, v3
; GFX7-NEXT: v_mov_b32_e32 v7, v2
-; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[4:5]
@@ -2373,10 +2310,9 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2
; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
-; GFX12-NEXT: v_add_nc_u32_e32 v15, 0x800, v4
; GFX12-NEXT: s_mov_b32 s1, exec_lo
; GFX12-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-NEXT: v_readfirstlane_b32 s4, v9
; GFX12-NEXT: v_readfirstlane_b32 s5, v10
; GFX12-NEXT: v_readfirstlane_b32 s6, v7
@@ -2390,7 +2326,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], null offen offset:2048
-; GFX12-NEXT: ; implicit-def: $vgpr4
; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_cbranch_execnz .LBB10_1
; GFX12-NEXT: ; %bb.2:
@@ -2420,7 +2355,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_cbranch_execnz .LBB10_4
; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB10_3 Depth=1
@@ -2474,22 +2409,21 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2
; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x800, v4
; GFX11-NEXT: s_mov_b32 s1, 0
; GFX11-NEXT: s_mov_b32 s2, exec_lo
; GFX11-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_readfirstlane_b32 s4, v9
; GFX11-NEXT: v_readfirstlane_b32 s5, v10
; GFX11-NEXT: v_readfirstlane_b32 s6, v7
; GFX11-NEXT: v_readfirstlane_b32 s7, v8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[9:10]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[7:8]
; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], 0 offen offset:2048
-; GFX11-NEXT: ; implicit-def: $vgpr4
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB10_1
; GFX11-NEXT: ; %bb.2:
@@ -2518,7 +2452,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], 0 offen offset:2048 glc
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB10_4
; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB10_3 Depth=1
@@ -2543,7 +2477,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX10-NEXT: v_mov_b32_e32 v7, v2
; GFX10-NEXT: v_mov_b32_e32 v10, v1
; GFX10-NEXT: v_mov_b32_e32 v9, v0
-; GFX10-NEXT: v_add_nc_u32_e32 v15, 0x800, v4
; GFX10-NEXT: s_mov_b32 s5, 0
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1
@@ -2556,7 +2489,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048
-; GFX10-NEXT: ; implicit-def: $vgpr4
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB10_1
@@ -2584,7 +2516,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB10_4
@@ -2640,7 +2572,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: v_mov_b32_e32 v7, v2
; GFX908-NEXT: v_mov_b32_e32 v10, v1
; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_add_u32_e32 v15, 0x800, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v9
@@ -2653,7 +2584,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
; GFX908-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048
-; GFX908-NEXT: ; implicit-def: $vgpr4
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB10_1
; GFX908-NEXT: ; %bb.2:
@@ -2680,7 +2610,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB10_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB10_3 Depth=1
@@ -2704,7 +2634,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: v_mov_b32_e32 v7, v2
; GFX8-NEXT: v_mov_b32_e32 v10, v1
; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_add_u32_e32 v15, vcc, 0x800, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v9
@@ -2717,7 +2646,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
; GFX8-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048
-; GFX8-NEXT: ; implicit-def: $vgpr4
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB10_1
; GFX8-NEXT: ; %bb.2:
@@ -2744,7 +2672,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB10_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB10_3 Depth=1
@@ -2768,7 +2696,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX7-NEXT: v_mov_b32_e32 v7, v2
; GFX7-NEXT: v_mov_b32_e32 v10, v1
; GFX7-NEXT: v_mov_b32_e32 v9, v0
-; GFX7-NEXT: v_add_i32_e32 v15, vcc, 0x800, v4
; GFX7-NEXT: s_mov_b64 s[6:7], exec
; GFX7-NEXT: .LBB10_1: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_readfirstlane_b32 s8, v9
@@ -2780,7 +2707,6 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX7-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048
-; GFX7-NEXT: ; implicit-def: $vgpr4
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB10_1
; GFX7-NEXT: ; %bb.2:
@@ -2807,7 +2733,7 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__waterfall__amdg
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB10_4
; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB10_3 Depth=1
@@ -2903,24 +2829,22 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: v_mov_b32_e32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
+; GFX12-NEXT: v_mov_b32_e32 v10, s16
; GFX12-NEXT: s_mov_b32 s4, 0
-; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048
+; GFX12-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_add_f64_e32 v[6:7], v[8:9], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f64_e32 v[7:8], v[9:10], v[4:5]
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7
+; GFX12-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX12-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -2945,25 +2869,23 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: v_mov_b32_e32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
+; GFX11-NEXT: v_mov_b32_e32 v10, s16
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7
+; GFX11-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX11-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -2978,26 +2900,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot
; GFX10-NEXT: v_mov_b32_e32 v4, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
; GFX10-NEXT: v_mov_b32_e32 v5, v1
-; GFX10-NEXT: s_add_i32 s4, s20, 0x800
-; GFX10-NEXT: v_mov_b32_e32 v6, s4
-; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
+; GFX10-NEXT: v_mov_b32_e32 v10, s20
; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX10-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v10, v1
-; GFX10-NEXT: v_mov_b32_e32 v9, v0
+; GFX10-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX10-NEXT: v_mov_b32_e32 v0, v7
-; GFX10-NEXT: v_mov_b32_e32 v1, v8
-; GFX10-NEXT: v_mov_b32_e32 v2, v9
-; GFX10-NEXT: v_mov_b32_e32 v3, v10
-; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX10-NEXT: v_mov_b32_e32 v0, v6
+; GFX10-NEXT: v_mov_b32_e32 v1, v7
+; GFX10-NEXT: v_mov_b32_e32 v2, v8
+; GFX10-NEXT: v_mov_b32_e32 v3, v9
+; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX10-NEXT: v_mov_b32_e32 v9, v1
+; GFX10-NEXT: v_mov_b32_e32 v8, v0
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB11_1
@@ -3010,23 +2931,22 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v4, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
+; GFX90A-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX90A-NEXT: v_mov_b32_e32 v5, v1
-; GFX90A-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x800
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v6, s6
+; GFX90A-NEXT: v_mov_b32_e32 v10, s20
; GFX90A-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_pk_mov_b32 v[10:11], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5]
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[8:9], v[8:9] op_sel:[0,1]
-; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[10:11], v[10:11] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX90A-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[8:9], v[8:9] op_sel:[0,1]
+; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
-; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: v_pk_mov_b32 v[8:9], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB11_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3038,26 +2958,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX908-NEXT: v_mov_b32_e32 v5, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v10, s20
; GFX908-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v0, v6
+; GFX908-NEXT: v_mov_b32_e32 v1, v7
+; GFX908-NEXT: v_mov_b32_e32 v2, v8
+; GFX908-NEXT: v_mov_b32_e32 v3, v9
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX908-NEXT: v_mov_b32_e32 v9, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v8, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB11_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3069,26 +2988,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX8-NEXT: v_mov_b32_e32 v5, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v10, s20
; GFX8-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v0, v6
+; GFX8-NEXT: v_mov_b32_e32 v1, v7
+; GFX8-NEXT: v_mov_b32_e32 v2, v8
+; GFX8-NEXT: v_mov_b32_e32 v3, v9
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: v_mov_b32_e32 v9, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v8, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB11_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3100,26 +3018,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v4, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
+; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX7-NEXT: v_mov_b32_e32 v5, v1
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX7-NEXT: s_add_i32 s6, s20, 0x800
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v6, s6
+; GFX7-NEXT: v_mov_b32_e32 v10, s20
; GFX7-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v10, v1
-; GFX7-NEXT: v_mov_b32_e32 v9, v0
-; GFX7-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v0, v7
-; GFX7-NEXT: v_mov_b32_e32 v1, v8
-; GFX7-NEXT: v_mov_b32_e32 v2, v9
-; GFX7-NEXT: v_mov_b32_e32 v3, v10
-; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v0, v6
+; GFX7-NEXT: v_mov_b32_e32 v1, v7
+; GFX7-NEXT: v_mov_b32_e32 v2, v8
+; GFX7-NEXT: v_mov_b32_e32 v3, v9
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v9, v1
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v8, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB11_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3131,27 +3048,27 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_remot
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v4, v0
; GFX6-NEXT: v_mov_b32_e32 v0, s20
-; GFX6-NEXT: v_mov_b32_e32 v5, v1
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
+; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX6-NEXT: s_add_i32 s6, s20, 0x800
+; GFX6-NEXT: v_mov_b32_e32 v5, v1
; GFX6-NEXT: s_mov_b64 s[4:5], 0
-; GFX6-NEXT: v_mov_b32_e32 v6, s6
+; GFX6-NEXT: v_mov_b32_e32 v10, s6
; GFX6-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v10, v1
-; GFX6-NEXT: v_mov_b32_e32 v9, v0
-; GFX6-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
+; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v0, v7
-; GFX6-NEXT: v_mov_b32_e32 v1, v8
-; GFX6-NEXT: v_mov_b32_e32 v2, v9
-; GFX6-NEXT: v_mov_b32_e32 v3, v10
-; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX6-NEXT: v_mov_b32_e32 v0, v6
+; GFX6-NEXT: v_mov_b32_e32 v1, v7
+; GFX6-NEXT: v_mov_b32_e32 v2, v8
+; GFX6-NEXT: v_mov_b32_e32 v3, v9
+; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen glc
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v9, v1
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_mov_b32_e32 v8, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB11_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3173,24 +3090,22 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: v_mov_b32_e32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
+; GFX12-NEXT: v_mov_b32_e32 v10, s16
; GFX12-NEXT: s_mov_b32 s4, 0
-; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048
+; GFX12-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB12_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_add_f64_e32 v[6:7], v[8:9], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_add_f64_e32 v[7:8], v[9:10], v[4:5]
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7
+; GFX12-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX12-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -3215,25 +3130,23 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: v_mov_b32_e32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
+; GFX11-NEXT: v_mov_b32_e32 v10, s16
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: buffer_load_b64 v[8:9], v0, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB12_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, v6 :: v_dual_mov_b32 v1, v7
+; GFX11-NEXT: v_dual_mov_b32 v2, v8 :: v_dual_mov_b32 v3, v9
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v10, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX11-NEXT: v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -3248,26 +3161,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX10-NEXT: v_mov_b32_e32 v4, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
; GFX10-NEXT: v_mov_b32_e32 v5, v1
-; GFX10-NEXT: s_add_i32 s4, s20, 0x800
-; GFX10-NEXT: v_mov_b32_e32 v6, s4
-; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
+; GFX10-NEXT: v_mov_b32_e32 v10, s20
; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX10-NEXT: .LBB12_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v10, v1
-; GFX10-NEXT: v_mov_b32_e32 v9, v0
+; GFX10-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX10-NEXT: v_mov_b32_e32 v0, v7
-; GFX10-NEXT: v_mov_b32_e32 v1, v8
-; GFX10-NEXT: v_mov_b32_e32 v2, v9
-; GFX10-NEXT: v_mov_b32_e32 v3, v10
-; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX10-NEXT: v_mov_b32_e32 v0, v6
+; GFX10-NEXT: v_mov_b32_e32 v1, v7
+; GFX10-NEXT: v_mov_b32_e32 v2, v8
+; GFX10-NEXT: v_mov_b32_e32 v3, v9
+; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[8:9]
+; GFX10-NEXT: v_mov_b32_e32 v9, v1
+; GFX10-NEXT: v_mov_b32_e32 v8, v0
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB12_1
@@ -3289,26 +3201,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX908-NEXT: v_mov_b32_e32 v5, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v10, s20
; GFX908-NEXT: .LBB12_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v0, v6
+; GFX908-NEXT: v_mov_b32_e32 v1, v7
+; GFX908-NEXT: v_mov_b32_e32 v2, v8
+; GFX908-NEXT: v_mov_b32_e32 v3, v9
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX908-NEXT: v_mov_b32_e32 v9, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v8, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB12_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3320,26 +3231,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX8-NEXT: v_mov_b32_e32 v5, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v10, s20
; GFX8-NEXT: .LBB12_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v0, v6
+; GFX8-NEXT: v_mov_b32_e32 v1, v7
+; GFX8-NEXT: v_mov_b32_e32 v2, v8
+; GFX8-NEXT: v_mov_b32_e32 v3, v9
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: v_mov_b32_e32 v9, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v8, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB12_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3351,26 +3261,25 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v4, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
+; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX7-NEXT: v_mov_b32_e32 v5, v1
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX7-NEXT: s_add_i32 s6, s20, 0x800
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v6, s6
+; GFX7-NEXT: v_mov_b32_e32 v10, s20
; GFX7-NEXT: .LBB12_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v10, v1
-; GFX7-NEXT: v_mov_b32_e32 v9, v0
-; GFX7-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v0, v7
-; GFX7-NEXT: v_mov_b32_e32 v1, v8
-; GFX7-NEXT: v_mov_b32_e32 v2, v9
-; GFX7-NEXT: v_mov_b32_e32 v3, v10
-; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v0, v6
+; GFX7-NEXT: v_mov_b32_e32 v1, v7
+; GFX7-NEXT: v_mov_b32_e32 v2, v8
+; GFX7-NEXT: v_mov_b32_e32 v3, v9
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen offset:2048 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v9, v1
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v8, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB12_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3382,27 +3291,27 @@ define double @buffer_fat_ptr_agent_atomic_fadd_ret_f64__offset__amdgpu_no_fine_
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v4, v0
; GFX6-NEXT: v_mov_b32_e32 v0, s20
-; GFX6-NEXT: v_mov_b32_e32 v5, v1
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
+; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v0, s[16:19], 0 offen offset:2048
; GFX6-NEXT: s_add_i32 s6, s20, 0x800
+; GFX6-NEXT: v_mov_b32_e32 v5, v1
; GFX6-NEXT: s_mov_b64 s[4:5], 0
-; GFX6-NEXT: v_mov_b32_e32 v6, s6
+; GFX6-NEXT: v_mov_b32_e32 v10, s6
; GFX6-NEXT: .LBB12_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v10, v1
-; GFX6-NEXT: v_mov_b32_e32 v9, v0
-; GFX6-NEXT: v_add_f64 v[7:8], v[9:10], v[4:5]
+; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v0, v7
-; GFX6-NEXT: v_mov_b32_e32 v1, v8
-; GFX6-NEXT: v_mov_b32_e32 v2, v9
-; GFX6-NEXT: v_mov_b32_e32 v3, v10
-; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX6-NEXT: v_mov_b32_e32 v0, v6
+; GFX6-NEXT: v_mov_b32_e32 v1, v7
+; GFX6-NEXT: v_mov_b32_e32 v2, v8
+; GFX6-NEXT: v_mov_b32_e32 v3, v9
+; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v10, s[16:19], 0 offen glc
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v9, v1
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_mov_b32_e32 v8, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB12_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7028,9 +6937,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no_fine_grained_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4
+; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16
; GFX11-NEXT: v_mov_b32_e32 v0, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
@@ -7042,7 +6949,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_pk_add_f16 v4, v5, v2
; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -7060,8 +6967,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v2, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB19_1: ; %atomicrmw.start
@@ -7072,7 +6978,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX10-NEXT: v_pk_add_f16 v4, v5, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -7099,9 +7005,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX908-NEXT: v_mov_b32_e32 v2, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -7109,7 +7014,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX908-NEXT: v_pk_add_f16 v4, v5, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -7126,9 +7031,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX8-NEXT: v_mov_b32_e32 v2, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -7138,7 +7042,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX8-NEXT: v_or_b32_e32 v4, v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -7156,7 +7060,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: buffer_load_dword v3, v2, s[16:19], 0 offen offset:1024
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v0
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -7164,7 +7067,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4
-; GFX7-NEXT: v_mov_b32_e32 v4, s6
+; GFX7-NEXT: v_mov_b32_e32 v4, s20
; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
@@ -7181,7 +7084,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: v_or_b32_e32 v5, v7, v0
; GFX7-NEXT: v_mov_b32_e32 v8, v6
; GFX7-NEXT: v_mov_b32_e32 v7, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v7
@@ -7277,9 +7180,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v1, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-NEXT: v_mov_b32_e32 v3, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 offen offset:1024
; GFX11-NEXT: .LBB20_1: ; %atomicrmw.start
@@ -7290,7 +7191,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_mov_b32_e32 v4, v1
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -7308,8 +7209,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB20_1: ; %atomicrmw.start
@@ -7319,7 +7219,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin
; GFX10-NEXT: v_mov_b32_e32 v5, v2
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: v_mov_b32_e32 v4, v1
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -7355,9 +7255,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -7366,7 +7265,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin
; GFX8-NEXT: v_or_b32_e32 v1, v4, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v2
; GFX8-NEXT: v_mov_b32_e32 v4, v1
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2
@@ -7385,7 +7284,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v0
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -7393,7 +7291,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2
; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
@@ -7410,7 +7308,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin
; GFX7-NEXT: v_or_b32_e32 v4, v6, v3
; GFX7-NEXT: v_mov_b32_e32 v7, v5
; GFX7-NEXT: v_mov_b32_e32 v6, v4
-; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v6
@@ -7543,7 +7441,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v9, 0x400, v4
; GFX11-NEXT: s_mov_b32 s1, 0
; GFX11-NEXT: s_mov_b32 s2, exec_lo
; GFX11-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
@@ -7558,7 +7455,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-NEXT: buffer_load_b32 v8, v4, s[4:7], 0 offen offset:1024
-; GFX11-NEXT: ; implicit-def: $vgpr4
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB21_1
; GFX11-NEXT: ; %bb.2:
@@ -7587,7 +7483,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[6:7], v9, s[4:7], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[6:7], v4, s[4:7], 0 offen offset:1024 glc
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB21_4
; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
@@ -7609,7 +7505,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_add_nc_u32_e32 v9, 0x400, v4
; GFX10-NEXT: s_mov_b32 s5, 0
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
@@ -7622,7 +7517,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
-; GFX10-NEXT: ; implicit-def: $vgpr4
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB21_1
@@ -7648,7 +7542,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB21_4
@@ -7697,7 +7591,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_add_u32_e32 v9, 0x400, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -7710,7 +7603,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
; GFX908-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
-; GFX908-NEXT: ; implicit-def: $vgpr4
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB21_1
; GFX908-NEXT: ; %bb.2:
@@ -7735,7 +7627,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB21_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
@@ -7755,7 +7647,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v9, vcc, 0x400, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -7768,7 +7659,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
; GFX8-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
-; GFX8-NEXT: ; implicit-def: $vgpr4
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB21_1
; GFX8-NEXT: ; %bb.2:
@@ -7778,9 +7668,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX8-NEXT: ; =>This Loop Header: Depth=1
; GFX8-NEXT: ; Child Loop BB21_4 Depth 2
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_add_f16_sdwa v4, v8, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_add_f16_e32 v6, v8, v5
-; GFX8-NEXT: v_or_b32_e32 v7, v6, v4
+; GFX8-NEXT: v_add_f16_sdwa v6, v8, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_e32 v7, v8, v5
+; GFX8-NEXT: v_or_b32_e32 v7, v7, v6
; GFX8-NEXT: v_mov_b32_e32 v6, v7
; GFX8-NEXT: s_mov_b64 s[12:13], exec
; GFX8-NEXT: v_mov_b32_e32 v7, v8
@@ -7795,7 +7685,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap v[6:7], v9, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB21_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
@@ -7815,7 +7705,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v9, vcc, 0x400, v4
; GFX7-NEXT: s_mov_b64 s[6:7], exec
; GFX7-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -7826,39 +7715,38 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3]
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
-; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX7-NEXT: ; implicit-def: $vgpr4
+; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB21_1
; GFX7-NEXT: ; %bb.2:
; GFX7-NEXT: s_mov_b64 exec, s[6:7]
; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v6
-; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v5
+; GFX7-NEXT: v_cvt_f16_f32_e32 v9, v5
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8
; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5
; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v6
-; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v9
; GFX7-NEXT: s_mov_b64 s[6:7], 0
; GFX7-NEXT: .LBB21_3: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Loop Header: Depth=1
; GFX7-NEXT: ; Child Loop BB21_4 Depth 2
; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v5
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
+; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v7
; GFX7-NEXT: s_mov_b64 s[12:13], exec
-; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v5
-; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v6
; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX7-NEXT: v_add_f32_e32 v6, v6, v10
-; GFX7-NEXT: v_add_f32_e32 v7, v7, v11
-; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v6
+; GFX7-NEXT: v_or_b32_e32 v6, v6, v5
+; GFX7-NEXT: v_add_f32_e32 v7, v7, v10
+; GFX7-NEXT: v_add_f32_e32 v8, v8, v11
; GFX7-NEXT: v_cvt_f16_f32_e32 v7, v7
-; GFX7-NEXT: v_or_b32_e32 v6, v4, v5
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v8
-; GFX7-NEXT: v_or_b32_e32 v5, v7, v4
-; GFX7-NEXT: v_mov_b32_e32 v8, v6
-; GFX7-NEXT: v_mov_b32_e32 v7, v5
+; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX7-NEXT: v_or_b32_e32 v5, v8, v5
+; GFX7-NEXT: v_mov_b32_e32 v9, v6
+; GFX7-NEXT: v_mov_b32_e32 v8, v5
; GFX7-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX7-NEXT: ; => This Inner Loop Header: Depth=2
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -7870,23 +7758,23 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v9, s[8:11], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB21_4
; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX7-NEXT: s_mov_b64 exec, s[12:13]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8
; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v7, v6
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6
; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX7-NEXT: s_cbranch_execnz .LBB21_3
; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v0, v4
+; GFX7-NEXT: v_mov_b32_e32 v0, v7
; GFX7-NEXT: v_mov_b32_e32 v1, v5
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
@@ -8003,9 +7891,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4
+; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16
; GFX11-NEXT: v_mov_b32_e32 v0, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
@@ -8017,7 +7903,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_pk_add_f16 v4, v5, v2
; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -8035,8 +7921,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v2, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB22_1: ; %atomicrmw.start
@@ -8047,7 +7932,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX10-NEXT: v_pk_add_f16 v4, v5, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -8065,16 +7950,15 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX90A-NEXT: v_mov_b32_e32 v2, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v5, v0
; GFX90A-NEXT: v_pk_add_f16 v4, v5, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -8091,9 +7975,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX908-NEXT: v_mov_b32_e32 v2, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -8101,7 +7984,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX908-NEXT: v_pk_add_f16 v4, v5, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -8118,9 +8001,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX8-NEXT: v_mov_b32_e32 v2, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -8130,7 +8012,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX8-NEXT: v_or_b32_e32 v4, v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -8148,7 +8030,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX7-NEXT: buffer_load_dword v3, v2, s[16:19], 0 offen offset:1024
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v0
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -8156,7 +8037,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4
-; GFX7-NEXT: v_mov_b32_e32 v4, s6
+; GFX7-NEXT: v_mov_b32_e32 v4, s20
; GFX7-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
@@ -8173,7 +8054,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX7-NEXT: v_or_b32_e32 v5, v7, v0
; GFX7-NEXT: v_mov_b32_e32 v8, v6
; GFX7-NEXT: v_mov_b32_e32 v7, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v7
@@ -8269,9 +8150,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v1, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-NEXT: v_mov_b32_e32 v3, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 offen offset:1024
; GFX11-NEXT: .LBB23_1: ; %atomicrmw.start
@@ -8282,7 +8161,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_mov_b32_e32 v4, v1
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -8300,8 +8179,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB23_1: ; %atomicrmw.start
@@ -8311,7 +8189,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX10-NEXT: v_mov_b32_e32 v5, v2
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: v_mov_b32_e32 v4, v1
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -8329,15 +8207,13 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v3, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, s6
; GFX90A-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_pk_add_f16 v2, v3, v0
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
@@ -8354,16 +8230,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: v_pk_add_f16 v1, v2, v0
; GFX908-NEXT: v_mov_b32_e32 v5, v2
; GFX908-NEXT: v_mov_b32_e32 v4, v1
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2
@@ -8380,9 +8255,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -8391,7 +8265,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX8-NEXT: v_or_b32_e32 v1, v4, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v2
; GFX8-NEXT: v_mov_b32_e32 v4, v1
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2
@@ -8410,7 +8284,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v0
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -8418,7 +8291,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2
; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
@@ -8435,7 +8308,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX7-NEXT: v_or_b32_e32 v4, v6, v3
; GFX7-NEXT: v_mov_b32_e32 v7, v5
; GFX7-NEXT: v_mov_b32_e32 v6, v4
-; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v6
@@ -8530,9 +8403,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no_remote_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s4
+; GFX11-NEXT: v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, s16
; GFX11-NEXT: v_mov_b32_e32 v0, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
@@ -8544,7 +8415,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_pk_add_f16 v4, v5, v2
; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -8562,8 +8433,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v2, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB24_1: ; %atomicrmw.start
@@ -8574,7 +8444,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX10-NEXT: v_pk_add_f16 v4, v5, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -8592,16 +8462,15 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX90A-NEXT: v_mov_b32_e32 v2, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB24_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v5, v0
; GFX90A-NEXT: v_pk_add_f16 v4, v5, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -8618,9 +8487,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX908-NEXT: v_mov_b32_e32 v2, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB24_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -8628,7 +8496,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX908-NEXT: v_pk_add_f16 v4, v5, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -8645,9 +8513,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX8-NEXT: v_mov_b32_e32 v2, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB24_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -8657,7 +8524,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX8-NEXT: v_or_b32_e32 v4, v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -8675,7 +8542,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: buffer_load_dword v3, v2, s[16:19], 0 offen offset:1024
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v0
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -8683,7 +8549,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4
-; GFX7-NEXT: v_mov_b32_e32 v4, s6
+; GFX7-NEXT: v_mov_b32_e32 v4, s20
; GFX7-NEXT: .LBB24_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
@@ -8700,7 +8566,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: v_or_b32_e32 v5, v7, v0
; GFX7-NEXT: v_mov_b32_e32 v8, v6
; GFX7-NEXT: v_mov_b32_e32 v7, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v7
@@ -8796,9 +8662,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v1, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-NEXT: v_mov_b32_e32 v3, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 offen offset:1024
; GFX11-NEXT: .LBB25_1: ; %atomicrmw.start
@@ -8809,7 +8673,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_mov_b32_e32 v4, v1
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -8827,8 +8691,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB25_1: ; %atomicrmw.start
@@ -8838,7 +8701,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX10-NEXT: v_mov_b32_e32 v5, v2
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: v_mov_b32_e32 v4, v1
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -8856,15 +8719,13 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v3, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v1, s6
; GFX90A-NEXT: .LBB25_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_pk_add_f16 v2, v3, v0
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v1, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
@@ -8881,16 +8742,15 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB25_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: v_pk_add_f16 v1, v2, v0
; GFX908-NEXT: v_mov_b32_e32 v5, v2
; GFX908-NEXT: v_mov_b32_e32 v4, v1
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2
@@ -8907,9 +8767,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v2, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB25_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -8918,7 +8777,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX8-NEXT: v_or_b32_e32 v1, v4, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v2
; GFX8-NEXT: v_mov_b32_e32 v4, v1
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v2
@@ -8937,7 +8796,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v0
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -8945,7 +8803,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2
; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB25_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
@@ -8962,7 +8820,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX7-NEXT: v_or_b32_e32 v4, v6, v3
; GFX7-NEXT: v_mov_b32_e32 v7, v5
; GFX7-NEXT: v_mov_b32_e32 v6, v4
-; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v6
@@ -9054,13 +8912,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -9082,7 +8939,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX942-NEXT: v_cndmask_b32_e64 v0, v5, v6, s[4:5]
; GFX942-NEXT: v_perm_b32 v6, v1, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[6:7]
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7
@@ -9097,12 +8954,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1
; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
@@ -9131,7 +8987,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -9149,10 +9005,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1
; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
@@ -9183,7 +9038,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -9202,9 +9057,8 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
@@ -9230,7 +9084,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX10-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v0, v5
; GFX10-NEXT: v_mov_b32_e32 v1, v6
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -9248,13 +9102,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -9275,7 +9128,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX90A-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v6, v1, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7
@@ -9292,13 +9145,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -9320,7 +9172,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX908-NEXT: v_perm_b32 v5, v1, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: v_mov_b32_e32 v1, v6
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -9337,11 +9189,10 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -9366,7 +9217,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX8-NEXT: v_alignbit_b32 v5, v1, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: v_mov_b32_e32 v1, v6
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -9382,7 +9233,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v4, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -9391,7 +9241,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v4
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v4
-; GFX7-NEXT: v_mov_b32_e32 v4, s6
+; GFX7-NEXT: v_mov_b32_e32 v4, s20
; GFX7-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
@@ -9406,7 +9256,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: v_alignbit_b32 v0, v0, v5, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v1
; GFX7-NEXT: v_mov_b32_e32 v5, v0
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -9488,13 +9338,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s16
; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -9515,7 +9364,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5]
; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1]
-; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -9531,11 +9380,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
-; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
@@ -9561,7 +9408,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -9580,11 +9427,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-FAKE16-NEXT: .p2align 6
; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
@@ -9610,7 +9455,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -9629,12 +9474,11 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
-; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -9656,7 +9500,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v6, v1
; GFX10-NEXT: v_mov_b32_e32 v5, v0
-; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -9674,13 +9518,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -9700,7 +9543,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -9717,13 +9560,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -9744,7 +9586,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v6, v1
; GFX908-NEXT: v_mov_b32_e32 v5, v0
-; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -9761,11 +9603,10 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -9789,7 +9630,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v6, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -9806,7 +9647,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -9815,7 +9655,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
@@ -9830,7 +9670,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: v_alignbit_b32 v3, v3, v5, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v4
; GFX7-NEXT: v_mov_b32_e32 v5, v3
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
@@ -9930,7 +9770,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX942-NEXT: s_mov_b64 s[2:3], exec
; GFX942-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -9942,40 +9781,39 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3]
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
-; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024
-; GFX942-NEXT: ; implicit-def: $vgpr4
+; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB28_1
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: s_mov_b64 exec, s[2:3]
; GFX942-NEXT: s_mov_b64 s[2:3], 0
-; GFX942-NEXT: v_lshlrev_b32_e32 v9, 16, v5
+; GFX942-NEXT: v_lshlrev_b32_e32 v10, 16, v5
; GFX942-NEXT: s_movk_i32 s10, 0x7fff
-; GFX942-NEXT: v_and_b32_e32 v10, 0xffff0000, v5
+; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX942-NEXT: s_mov_b32 s11, 0x7060302
; GFX942-NEXT: .LBB28_3: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Loop Header: Depth=1
; GFX942-NEXT: ; Child Loop BB28_4 Depth 2
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v7
-; GFX942-NEXT: v_add_f32_e32 v4, v4, v9
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s10
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_lshlrev_b32_e32 v6, 16, v9
+; GFX942-NEXT: v_add_f32_e32 v6, v6, v10
+; GFX942-NEXT: v_bfe_u32 v7, v6, 16, 1
+; GFX942-NEXT: v_add3_u32 v7, v7, v6, s10
+; GFX942-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX942-NEXT: s_mov_b64 s[8:9], exec
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v7
-; GFX942-NEXT: v_add_f32_e32 v5, v5, v10
-; GFX942-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX942-NEXT: v_add3_u32 v6, v6, v5, s10
-; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
+; GFX942-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
+; GFX942-NEXT: v_and_b32_e32 v7, 0xffff0000, v9
+; GFX942-NEXT: v_add_f32_e32 v7, v7, v5
+; GFX942-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX942-NEXT: v_add3_u32 v8, v8, v7, s10
+; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc
-; GFX942-NEXT: v_perm_b32 v6, v5, v4, s11
-; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7]
+; GFX942-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc
+; GFX942-NEXT: v_perm_b32 v8, v7, v6, s11
+; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9]
; GFX942-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1
; GFX942-NEXT: ; => This Inner Loop Header: Depth=2
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -9988,27 +9826,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB28_4
; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1
; GFX942-NEXT: s_mov_b64 exec, s[8:9]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v7, v4
+; GFX942-NEXT: v_mov_b32_e32 v9, v6
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB28_3
; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v0, v4
+; GFX942-NEXT: v_mov_b32_e32 v0, v6
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-TRUE16-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1
@@ -10022,8 +9859,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-TRUE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024
; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB28_1
; GFX11-TRUE16-NEXT: ; %bb.2:
@@ -10036,28 +9872,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: ; =>This Loop Header: Depth=1
; GFX11-TRUE16-NEXT: ; Child Loop BB28_4 Depth 2
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7
; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, v5, v8 :: v_dual_add_f32 v4, v4, v9
-; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_dual_add_f32 v6, v6, v8 :: v_dual_add_f32 v5, v5, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v6, 16, 1
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v7
; GFX11-TRUE16-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1
; GFX11-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -10071,14 +9907,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB28_4
; GFX11-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1
; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v7, v5
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
; GFX11-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
@@ -10088,13 +9924,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v5
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-FAKE16-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1
@@ -10108,8 +9943,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-FAKE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024
; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB28_1
; GFX11-FAKE16-NEXT: ; %bb.2:
@@ -10122,28 +9956,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: ; =>This Loop Header: Depth=1
; GFX11-FAKE16-NEXT: ; Child Loop BB28_4 Depth 2
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7
; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, v5, v9 :: v_dual_add_f32 v4, v4, v8
-; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_add_f32 v6, v6, v9 :: v_dual_add_f32 v5, v5, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v6, v5, 0x7060302
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v7
; GFX11-FAKE16-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1
; GFX11-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -10157,14 +9991,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB28_4
; GFX11-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1
; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v5
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
@@ -10174,13 +10008,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v5
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX10-NEXT: s_mov_b32 s5, 0
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1
@@ -10192,8 +10025,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX10-NEXT: v_cmp_eq_u64_e64 s4, s[10:11], v[2:3]
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
-; GFX10-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX10-NEXT: ; implicit-def: $vgpr4
+; GFX10-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB28_1
@@ -10205,25 +10037,25 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX10-NEXT: ; =>This Loop Header: Depth=1
; GFX10-NEXT: ; Child Loop BB28_4 Depth 2
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: v_add_f32_e32 v4, v4, v8
-; GFX10-NEXT: v_add_f32_e32 v5, v5, v9
-; GFX10-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX10-NEXT: v_bfe_u32 v11, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX10-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX10-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX10-NEXT: v_add_f32_e32 v5, v5, v8
+; GFX10-NEXT: v_add_f32_e32 v6, v6, v9
+; GFX10-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX10-NEXT: v_bfe_u32 v11, v6, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
-; GFX10-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
-; GFX10-NEXT: v_mov_b32_e32 v4, v5
+; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX10-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
+; GFX10-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX10-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
+; GFX10-NEXT: v_perm_b32 v6, v6, v5, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v5, v6
+; GFX10-NEXT: v_mov_b32_e32 v6, v7
; GFX10-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1
; GFX10-NEXT: ; => This Inner Loop Header: Depth=2
; GFX10-NEXT: v_readfirstlane_b32 s8, v0
@@ -10235,15 +10067,15 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB28_4
; GFX10-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1
; GFX10-NEXT: s_mov_b32 exec_lo, s6
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX10-NEXT: v_mov_b32_e32 v7, v5
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
@@ -10252,13 +10084,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX10-NEXT: s_cbranch_execnz .LBB28_3
; GFX10-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX10-NEXT: v_mov_b32_e32 v0, v4
+; GFX10-NEXT: v_mov_b32_e32 v0, v5
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX90A: ; %bb.0:
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX90A-NEXT: s_mov_b64 s[6:7], exec
; GFX90A-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -10270,38 +10101,37 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_nop 0
-; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX90A-NEXT: ; implicit-def: $vgpr4
+; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB28_1
; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_mov_b64 exec, s[6:7]
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
-; GFX90A-NEXT: v_lshlrev_b32_e32 v9, 16, v5
+; GFX90A-NEXT: v_lshlrev_b32_e32 v10, 16, v5
; GFX90A-NEXT: s_movk_i32 s14, 0x7fff
-; GFX90A-NEXT: v_and_b32_e32 v10, 0xffff0000, v5
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX90A-NEXT: s_mov_b32 s15, 0x7060302
; GFX90A-NEXT: .LBB28_3: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Loop Header: Depth=1
; GFX90A-NEXT: ; Child Loop BB28_4 Depth 2
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v7
-; GFX90A-NEXT: v_add_f32_e32 v4, v4, v9
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s14
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v7
-; GFX90A-NEXT: v_add_f32_e32 v5, v5, v10
-; GFX90A-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX90A-NEXT: v_add3_u32 v6, v6, v5, s14
-; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc
-; GFX90A-NEXT: v_perm_b32 v6, v5, v4, s15
+; GFX90A-NEXT: v_lshlrev_b32_e32 v6, 16, v9
+; GFX90A-NEXT: v_add_f32_e32 v6, v6, v10
+; GFX90A-NEXT: v_bfe_u32 v7, v6, 16, 1
+; GFX90A-NEXT: v_add3_u32 v7, v7, v6, s14
+; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX90A-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
+; GFX90A-NEXT: v_and_b32_e32 v7, 0xffff0000, v9
+; GFX90A-NEXT: v_add_f32_e32 v7, v7, v5
+; GFX90A-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX90A-NEXT: v_add3_u32 v8, v8, v7, s14
+; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
+; GFX90A-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc
+; GFX90A-NEXT: v_perm_b32 v8, v7, v6, s15
; GFX90A-NEXT: s_mov_b64 s[12:13], exec
-; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1]
; GFX90A-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1
; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -10313,27 +10143,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB28_4
; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1
; GFX90A-NEXT: s_mov_b64 exec, s[12:13]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v7, v4
+; GFX90A-NEXT: v_mov_b32_e32 v9, v6
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB28_3
; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v0, v4
+; GFX90A-NEXT: v_mov_b32_e32 v0, v6
; GFX90A-NEXT: s_setpc_b64 s[30:31]
;
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -10345,8 +10174,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
-; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX908-NEXT: ; implicit-def: $vgpr4
+; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB28_1
; GFX908-NEXT: ; %bb.2:
@@ -10360,24 +10188,24 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX908-NEXT: ; =>This Loop Header: Depth=1
; GFX908-NEXT: ; Child Loop BB28_4 Depth 2
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX908-NEXT: v_add_f32_e32 v4, v4, v8
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s14
-; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX908-NEXT: v_add_f32_e32 v5, v5, v9
-; GFX908-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX908-NEXT: v_add3_u32 v10, v10, v5, s14
-; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX908-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX908-NEXT: v_add_f32_e32 v5, v5, v8
+; GFX908-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX908-NEXT: v_add3_u32 v6, v6, v5, s14
+; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v5
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc
-; GFX908-NEXT: v_perm_b32 v5, v5, v4, s15
-; GFX908-NEXT: v_mov_b32_e32 v4, v5
-; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc
+; GFX908-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX908-NEXT: v_add_f32_e32 v6, v6, v9
+; GFX908-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX908-NEXT: v_add3_u32 v10, v10, v6, s14
+; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX908-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc
+; GFX908-NEXT: v_perm_b32 v6, v6, v5, s15
; GFX908-NEXT: v_mov_b32_e32 v5, v6
+; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_mov_b32_e32 v6, v7
; GFX908-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1
; GFX908-NEXT: ; => This Inner Loop Header: Depth=2
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -10389,27 +10217,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB28_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1
; GFX908-NEXT: s_mov_b64 exec, s[12:13]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v6, v4
+; GFX908-NEXT: v_mov_b32_e32 v7, v5
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB28_3
; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v0, v4
+; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -10421,8 +10248,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
-; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX8-NEXT: ; implicit-def: $vgpr4
+; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB28_1
; GFX8-NEXT: ; %bb.2:
@@ -10434,27 +10260,27 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX8-NEXT: ; =>This Loop Header: Depth=1
; GFX8-NEXT: ; Child Loop BB28_4 Depth 2
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX8-NEXT: v_add_f32_e32 v4, v4, v8
-; GFX8-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v4
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX8-NEXT: v_add_f32_e32 v5, v5, v9
-; GFX8-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v5
-; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10
-; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX8-NEXT: v_add_f32_e32 v5, v5, v8
+; GFX8-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v5
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v5, v5, v4, 16
-; GFX8-NEXT: v_mov_b32_e32 v4, v5
-; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc
+; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX8-NEXT: v_add_f32_e32 v6, v6, v9
+; GFX8-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v6
+; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10
+; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v6
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX8-NEXT: v_alignbit_b32 v6, v6, v5, 16
; GFX8-NEXT: v_mov_b32_e32 v5, v6
+; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_mov_b32_e32 v6, v7
; GFX8-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1
; GFX8-NEXT: ; => This Inner Loop Header: Depth=2
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -10466,27 +10292,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB28_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1
; GFX8-NEXT: s_mov_b64 exec, s[12:13]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v5
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB28_3
; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v8, vcc, 0x400, v4
; GFX7-NEXT: s_mov_b64 s[6:7], exec
; GFX7-NEXT: .LBB28_1: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -10497,36 +10322,35 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3]
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
-; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX7-NEXT: ; implicit-def: $vgpr4
+; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB28_1
; GFX7-NEXT: ; %bb.2:
; GFX7-NEXT: s_mov_b64 exec, s[6:7]
; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
-; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v5
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v5
; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v6
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v7
-; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; GFX7-NEXT: s_mov_b64 s[6:7], 0
-; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v5
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v5
; GFX7-NEXT: .LBB28_3: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Loop Header: Depth=1
; GFX7-NEXT: ; Child Loop BB28_4 Depth 2
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v7
-; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v4
-; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v7
-; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v5
-; GFX7-NEXT: v_add_f32_e32 v4, v4, v10
-; GFX7-NEXT: v_add_f32_e32 v6, v6, v9
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_alignbit_b32 v4, v4, v6, 16
-; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v7
-; GFX7-NEXT: v_alignbit_b32 v5, v6, v5, 16
-; GFX7-NEXT: v_mov_b32_e32 v7, v5
+; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
+; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v8
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v7
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX7-NEXT: v_add_f32_e32 v8, v8, v11
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX7-NEXT: v_add_f32_e32 v5, v5, v10
+; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX7-NEXT: v_alignbit_b32 v6, v7, v6, 16
+; GFX7-NEXT: v_alignbit_b32 v5, v8, v5, 16
+; GFX7-NEXT: v_mov_b32_e32 v9, v6
; GFX7-NEXT: s_mov_b64 s[12:13], exec
-; GFX7-NEXT: v_mov_b32_e32 v6, v4
+; GFX7-NEXT: v_mov_b32_e32 v8, v5
; GFX7-NEXT: .LBB28_4: ; Parent Loop BB28_3 Depth=1
; GFX7-NEXT: ; => This Inner Loop Header: Depth=2
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -10538,23 +10362,23 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterf
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v8, s[8:11], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB28_4
; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB28_3 Depth=1
; GFX7-NEXT: s_mov_b64 exec, s[12:13]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v6, v5
-; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v6
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8
; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v6
+; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX7-NEXT: s_cbranch_execnz .LBB28_3
; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v0, v7
-; GFX7-NEXT: v_mov_b32_e32 v1, v4
+; GFX7-NEXT: v_mov_b32_e32 v0, v8
+; GFX7-NEXT: v_mov_b32_e32 v1, v7
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
@@ -10658,13 +10482,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -10686,7 +10509,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX942-NEXT: v_cndmask_b32_e64 v0, v5, v6, s[4:5]
; GFX942-NEXT: v_perm_b32 v6, v1, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[6:7]
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7
@@ -10701,12 +10524,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1
; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB29_1: ; %atomicrmw.start
@@ -10735,7 +10557,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -10753,10 +10575,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1
; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
@@ -10787,7 +10608,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -10806,9 +10627,8 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
@@ -10834,7 +10654,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX10-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v0, v5
; GFX10-NEXT: v_mov_b32_e32 v1, v6
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -10852,13 +10672,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -10879,7 +10698,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX90A-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v6, v1, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7
@@ -10896,13 +10715,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -10924,7 +10742,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX908-NEXT: v_perm_b32 v5, v1, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: v_mov_b32_e32 v1, v6
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -10941,11 +10759,10 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -10970,7 +10787,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX8-NEXT: v_alignbit_b32 v5, v1, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: v_mov_b32_e32 v1, v6
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -10986,7 +10803,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v4, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -10995,7 +10811,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v4
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v4
-; GFX7-NEXT: v_mov_b32_e32 v4, s6
+; GFX7-NEXT: v_mov_b32_e32 v4, s20
; GFX7-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
@@ -11010,7 +10826,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset(ptr add
; GFX7-NEXT: v_alignbit_b32 v0, v0, v5, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v1
; GFX7-NEXT: v_mov_b32_e32 v5, v0
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -11092,13 +10908,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s16
; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB30_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -11119,7 +10934,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5]
; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1]
-; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -11135,11 +10950,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
-; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB30_1: ; %atomicrmw.start
@@ -11165,7 +10978,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -11184,11 +10997,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-FAKE16-NEXT: .p2align 6
; GFX11-FAKE16-NEXT: .LBB30_1: ; %atomicrmw.start
@@ -11214,7 +11025,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -11233,12 +11044,11 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
-; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB30_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -11260,7 +11070,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v6, v1
; GFX10-NEXT: v_mov_b32_e32 v5, v0
-; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -11278,13 +11088,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB30_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -11304,7 +11113,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -11321,13 +11130,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB30_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -11348,7 +11156,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v6, v1
; GFX908-NEXT: v_mov_b32_e32 v5, v0
-; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -11365,11 +11173,10 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB30_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -11393,7 +11200,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v6, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -11410,7 +11217,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -11419,7 +11225,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB30_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
@@ -11434,7 +11240,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset(ptr addrspace
; GFX7-NEXT: v_alignbit_b32 v3, v3, v5, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v4
; GFX7-NEXT: v_mov_b32_e32 v5, v3
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
@@ -11517,13 +11323,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB31_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -11545,7 +11350,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX942-NEXT: v_cndmask_b32_e64 v0, v5, v6, s[4:5]
; GFX942-NEXT: v_perm_b32 v6, v1, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[6:7]
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7
@@ -11560,12 +11365,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1
; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB31_1: ; %atomicrmw.start
@@ -11594,7 +11398,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -11612,10 +11416,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1
; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
@@ -11646,7 +11449,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -11665,9 +11468,8 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
@@ -11693,7 +11495,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX10-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v0, v5
; GFX10-NEXT: v_mov_b32_e32 v1, v6
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -11711,13 +11513,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB31_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -11738,7 +11539,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX90A-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v6, v1, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7
@@ -11755,13 +11556,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB31_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -11783,7 +11583,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX908-NEXT: v_perm_b32 v5, v1, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: v_mov_b32_e32 v1, v6
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -11800,11 +11600,10 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB31_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -11829,7 +11628,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX8-NEXT: v_alignbit_b32 v5, v1, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: v_mov_b32_e32 v1, v6
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -11845,7 +11644,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v4, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -11854,7 +11652,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v4
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v4
-; GFX7-NEXT: v_mov_b32_e32 v4, s6
+; GFX7-NEXT: v_mov_b32_e32 v4, s20
; GFX7-NEXT: .LBB31_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
@@ -11869,7 +11667,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fadd_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: v_alignbit_b32 v0, v0, v5, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v1
; GFX7-NEXT: v_mov_b32_e32 v5, v0
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -11951,13 +11749,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s16
; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB32_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -11978,7 +11775,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5]
; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1]
-; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -11994,11 +11791,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
-; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB32_1: ; %atomicrmw.start
@@ -12024,7 +11819,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -12043,11 +11838,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-FAKE16-NEXT: .p2align 6
; GFX11-FAKE16-NEXT: .LBB32_1: ; %atomicrmw.start
@@ -12073,7 +11866,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -12092,12 +11885,11 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
-; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB32_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -12119,7 +11911,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v6, v1
; GFX10-NEXT: v_mov_b32_e32 v5, v0
-; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -12137,13 +11929,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB32_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -12163,7 +11954,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -12180,13 +11971,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB32_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -12207,7 +11997,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v6, v1
; GFX908-NEXT: v_mov_b32_e32 v5, v0
-; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -12224,11 +12014,10 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB32_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -12252,7 +12041,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v6, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -12269,7 +12058,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -12278,7 +12066,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB32_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
@@ -12293,7 +12081,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_re
; GFX7-NEXT: v_alignbit_b32 v3, v3, v5, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v4
; GFX7-NEXT: v_mov_b32_e32 v5, v3
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
@@ -12375,13 +12163,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s16
; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB33_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -12402,7 +12189,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5]
; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1]
-; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -12418,11 +12205,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
-; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB33_1: ; %atomicrmw.start
@@ -12448,7 +12233,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -12467,11 +12252,9 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-FAKE16-NEXT: .p2align 6
; GFX11-FAKE16-NEXT: .LBB33_1: ; %atomicrmw.start
@@ -12497,7 +12280,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -12516,12 +12299,11 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
-; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB33_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -12543,7 +12325,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v6, v1
; GFX10-NEXT: v_mov_b32_e32 v5, v0
-; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -12561,13 +12343,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB33_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -12587,7 +12368,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -12604,13 +12385,12 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB33_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -12631,7 +12411,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v6, v1
; GFX908-NEXT: v_mov_b32_e32 v5, v0
-; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -12648,11 +12428,10 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB33_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -12676,7 +12455,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v6, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -12693,7 +12472,6 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -12702,7 +12480,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB33_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
@@ -12717,7 +12495,7 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: v_alignbit_b32 v3, v3, v5, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v4
; GFX7-NEXT: v_mov_b32_e32 v5, v3
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
@@ -12825,8 +12603,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v2, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB34_1: ; %atomicrmw.start
@@ -12837,7 +12614,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_
; GFX10-NEXT: v_add_f32_e32 v4, v5, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -12855,9 +12632,8 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_
; GFX90A-NEXT: v_mov_b32_e32 v2, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB34_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -12865,7 +12641,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_
; GFX90A-NEXT: v_add_f32_e32 v4, v5, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
; GFX90A-NEXT: buffer_wbl2
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_invl2
; GFX90A-NEXT: buffer_wbinvl1
@@ -12883,9 +12659,8 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: v_mov_b32_e32 v2, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB34_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -12893,7 +12668,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: v_add_f32_e32 v4, v5, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -12910,9 +12685,8 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: v_mov_b32_e32 v2, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB34_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -12920,7 +12694,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: v_add_f32_e32 v4, v5, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -12937,9 +12711,8 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_
; GFX7-NEXT: v_mov_b32_e32 v2, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v3, s6
+; GFX7-NEXT: v_mov_b32_e32 v3, s20
; GFX7-NEXT: .LBB34_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -12947,7 +12720,7 @@ define float @buffer_fat_ptr_system_atomic_fadd_ret_f32__offset__amdgpu_no_fine_
; GFX7-NEXT: v_add_f32_e32 v4, v5, v2
; GFX7-NEXT: v_mov_b32_e32 v0, v4
; GFX7-NEXT: v_mov_b32_e32 v1, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll
index f7a1fb3..1a4140c 100644
--- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmax.ll
@@ -37,10 +37,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -49,7 +48,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX942-NEXT: v_max_f32_e32 v4, v0, v2
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -88,10 +87,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -99,7 +97,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5
; GFX90A-NEXT: v_max_f32_e32 v4, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -116,10 +114,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -128,7 +125,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_max_f32_e32 v4, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -145,10 +142,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -157,7 +153,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_max_f32_e32 v4, v0, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -212,10 +208,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s16
; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_max_f32_e32 v2, v0, v0
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB1_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -223,7 +218,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_
; GFX942-NEXT: v_max_f32_e32 v0, v0, v2
; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[0:1]
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -262,17 +257,16 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_max_f32_e32 v2, v0, v0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB1_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_max_f32_e32 v0, v1, v1
; GFX90A-NEXT: v_max_f32_e32 v0, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -289,10 +283,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_max_f32_e32 v2, v0, v0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB1_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -300,7 +293,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: v_max_f32_e32 v0, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: v_mov_b32_e32 v4, v0
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -317,10 +310,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB1_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -328,7 +320,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: v_max_f32_e32 v0, v0, v2
; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: v_mov_b32_e32 v4, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -402,7 +394,6 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX942-NEXT: s_mov_b64 s[2:3], exec
; GFX942-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -414,22 +405,21 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3]
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
-; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024
-; GFX942-NEXT: ; implicit-def: $vgpr4
+; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB2_1
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: s_mov_b64 exec, s[2:3]
; GFX942-NEXT: s_mov_b64 s[2:3], 0
-; GFX942-NEXT: v_max_f32_e32 v9, v5, v5
+; GFX942-NEXT: v_max_f32_e32 v5, v5, v5
; GFX942-NEXT: .LBB2_3: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Loop Header: Depth=1
; GFX942-NEXT: ; Child Loop BB2_4 Depth 2
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_max_f32_e32 v4, v7, v7
-; GFX942-NEXT: v_max_f32_e32 v6, v4, v9
+; GFX942-NEXT: v_max_f32_e32 v6, v9, v9
+; GFX942-NEXT: v_max_f32_e32 v8, v6, v5
; GFX942-NEXT: s_mov_b64 s[8:9], exec
-; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7]
+; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9]
; GFX942-NEXT: buffer_wbl2 sc1
; GFX942-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1
; GFX942-NEXT: ; => This Inner Loop Header: Depth=2
@@ -443,21 +433,21 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB2_4
; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
; GFX942-NEXT: s_mov_b64 exec, s[8:9]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v7, v4
+; GFX942-NEXT: v_mov_b32_e32 v9, v6
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB2_3
; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v0, v4
+; GFX942-NEXT: v_mov_b32_e32 v0, v6
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
@@ -522,7 +512,6 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX90A: ; %bb.0:
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX90A-NEXT: s_mov_b64 s[6:7], exec
; GFX90A-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -534,22 +523,21 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_nop 0
-; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX90A-NEXT: ; implicit-def: $vgpr4
+; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB2_1
; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_mov_b64 exec, s[6:7]
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
-; GFX90A-NEXT: v_max_f32_e32 v9, v5, v5
+; GFX90A-NEXT: v_max_f32_e32 v5, v5, v5
; GFX90A-NEXT: .LBB2_3: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Loop Header: Depth=1
; GFX90A-NEXT: ; Child Loop BB2_4 Depth 2
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_max_f32_e32 v4, v7, v7
-; GFX90A-NEXT: v_max_f32_e32 v6, v4, v9
+; GFX90A-NEXT: v_max_f32_e32 v6, v9, v9
+; GFX90A-NEXT: v_max_f32_e32 v8, v6, v5
; GFX90A-NEXT: s_mov_b64 s[12:13], exec
-; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1]
; GFX90A-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1
; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -561,27 +549,26 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB2_4
; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
; GFX90A-NEXT: s_mov_b64 exec, s[12:13]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v7, v4
+; GFX90A-NEXT: v_mov_b32_e32 v9, v6
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB2_3
; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v0, v4
+; GFX90A-NEXT: v_mov_b32_e32 v0, v6
; GFX90A-NEXT: s_setpc_b64 s[30:31]
;
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -593,8 +580,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
-; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX908-NEXT: ; implicit-def: $vgpr4
+; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB2_1
; GFX908-NEXT: ; %bb.2:
@@ -605,11 +591,11 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX908-NEXT: ; =>This Loop Header: Depth=1
; GFX908-NEXT: ; Child Loop BB2_4 Depth 2
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_max_f32_e32 v4, v6, v6
-; GFX908-NEXT: v_max_f32_e32 v5, v4, v8
-; GFX908-NEXT: v_mov_b32_e32 v4, v5
-; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_max_f32_e32 v5, v7, v7
+; GFX908-NEXT: v_max_f32_e32 v6, v5, v8
; GFX908-NEXT: v_mov_b32_e32 v5, v6
+; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_mov_b32_e32 v6, v7
; GFX908-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1
; GFX908-NEXT: ; => This Inner Loop Header: Depth=2
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -621,27 +607,26 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB2_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
; GFX908-NEXT: s_mov_b64 exec, s[12:13]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v6, v4
+; GFX908-NEXT: v_mov_b32_e32 v7, v5
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB2_3
; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v0, v4
+; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -653,8 +638,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
-; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX8-NEXT: ; implicit-def: $vgpr4
+; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB2_1
; GFX8-NEXT: ; %bb.2:
@@ -665,11 +649,11 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX8-NEXT: ; =>This Loop Header: Depth=1
; GFX8-NEXT: ; Child Loop BB2_4 Depth 2
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mul_f32_e32 v4, 1.0, v6
-; GFX8-NEXT: v_max_f32_e32 v5, v4, v8
-; GFX8-NEXT: v_mov_b32_e32 v4, v5
-; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_mul_f32_e32 v5, 1.0, v7
+; GFX8-NEXT: v_max_f32_e32 v6, v5, v8
; GFX8-NEXT: v_mov_b32_e32 v5, v6
+; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_mov_b32_e32 v6, v7
; GFX8-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1
; GFX8-NEXT: ; => This Inner Loop Header: Depth=2
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -681,21 +665,21 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgp
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB2_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
; GFX8-NEXT: s_mov_b64 exec, s[12:13]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v5
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB2_3
; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
@@ -777,10 +761,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -789,7 +772,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX942-NEXT: v_max_f32_e32 v4, v0, v2
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -804,11 +787,10 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_max_f32 v2, v1, v1
-; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mov_b32 v3, s16 :: v_dual_max_f32 v2, v1, v1
+; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
@@ -819,7 +801,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX11-NEXT: v_max_f32_e32 v4, v0, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -837,11 +819,10 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: v_max_f32_e32 v2, v1, v1
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -851,7 +832,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX10-NEXT: v_max_f32_e32 v4, v0, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -869,10 +850,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -880,7 +860,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5
; GFX90A-NEXT: v_max_f32_e32 v4, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -897,10 +877,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -909,7 +888,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX908-NEXT: v_max_f32_e32 v4, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -926,10 +905,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -938,7 +916,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX8-NEXT: v_max_f32_e32 v4, v0, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -955,10 +933,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX7-NEXT: v_mov_b32_e32 v1, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; GFX7-NEXT: v_mov_b32_e32 v3, s6
+; GFX7-NEXT: v_mov_b32_e32 v3, s20
; GFX7-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -967,7 +944,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_remote
; GFX7-NEXT: v_max_f32_e32 v4, v0, v2
; GFX7-NEXT: v_mov_b32_e32 v0, v4
; GFX7-NEXT: v_mov_b32_e32 v1, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1035,10 +1012,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -1047,7 +1023,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX942-NEXT: v_max_f32_e32 v4, v0, v2
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1086,10 +1062,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -1097,7 +1072,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5
; GFX90A-NEXT: v_max_f32_e32 v4, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1114,10 +1089,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -1126,7 +1100,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_max_f32_e32 v4, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1143,10 +1117,9 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -1155,7 +1128,7 @@ define float @buffer_fat_ptr_agent_atomic_fmax_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_max_f32_e32 v4, v0, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1201,29 +1174,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX12-NEXT: v_mov_b32_e32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3]
-; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048
+; GFX12-NEXT: v_mov_b32_e32 v2, s16
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1]
+; GFX12-NEXT: v_mov_b32_e32 v8, s16
; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10]
-; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[0:1], v[6:7]
+; GFX12-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_mov_b32_e32 v3, v5
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1246,30 +1217,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
-; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: v_mov_b32_e32 v2, s16
+; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v8, s16
; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1301,30 +1270,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v2, v0
-; GFX908-NEXT: v_mov_b32_e32 v0, s20
-; GFX908-NEXT: v_mov_b32_e32 v3, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
+; GFX908-NEXT: v_mov_b32_e32 v2, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v8, s20
; GFX908-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX908-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX908-NEXT: v_mov_b32_e32 v0, v2
+; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_mov_b32_e32 v2, v4
+; GFX908-NEXT: v_mov_b32_e32 v3, v5
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB5_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1334,30 +1300,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, v0
-; GFX8-NEXT: v_mov_b32_e32 v0, s20
-; GFX8-NEXT: v_mov_b32_e32 v3, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
+; GFX8-NEXT: v_mov_b32_e32 v2, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v8, s20
; GFX8-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_mov_b32_e32 v2, v4
+; GFX8-NEXT: v_mov_b32_e32 v3, v5
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB5_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1397,11 +1360,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_mov_b32_e32 v2, s16
; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1]
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
-; GFX12-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], null offen offset:2048
+; GFX12-NEXT: v_mov_b32_e32 v6, s16
; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
@@ -1411,7 +1372,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[4:5]
; GFX12-NEXT: v_dual_mov_b32 v10, v3 :: v_dual_mov_b32 v9, v2
; GFX12-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v7, v0
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
@@ -1440,11 +1401,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v2, s16
; GFX11-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
-; GFX11-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: v_mov_b32_e32 v6, s16
; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
@@ -1454,7 +1413,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_
; GFX11-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
; GFX11-NEXT: v_dual_mov_b32 v10, v3 :: v_dual_mov_b32 v9, v2
; GFX11-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v7, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -1494,9 +1453,8 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_
; GFX908-NEXT: v_mov_b32_e32 v2, s20
; GFX908-NEXT: buffer_load_dwordx2 v[2:3], v2, s[16:19], 0 offen offset:2048
; GFX908-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v6, s20
; GFX908-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -1506,7 +1464,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_
; GFX908-NEXT: v_mov_b32_e32 v9, v2
; GFX908-NEXT: v_mov_b32_e32 v8, v1
; GFX908-NEXT: v_mov_b32_e32 v7, v0
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3]
@@ -1525,9 +1483,8 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_
; GFX8-NEXT: v_mov_b32_e32 v2, s20
; GFX8-NEXT: buffer_load_dwordx2 v[2:3], v2, s[16:19], 0 offen offset:2048
; GFX8-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v6, s20
; GFX8-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -1537,7 +1494,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__offset__amdgpu_no_fine_
; GFX8-NEXT: v_mov_b32_e32 v9, v2
; GFX8-NEXT: v_mov_b32_e32 v8, v1
; GFX8-NEXT: v_mov_b32_e32 v7, v0
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3]
@@ -1583,10 +1540,9 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2
; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
-; GFX12-NEXT: v_add_nc_u32_e32 v15, 0x800, v4
; GFX12-NEXT: s_mov_b32 s1, exec_lo
; GFX12-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-NEXT: v_readfirstlane_b32 s4, v9
; GFX12-NEXT: v_readfirstlane_b32 s5, v10
; GFX12-NEXT: v_readfirstlane_b32 s6, v7
@@ -1600,12 +1556,11 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], null offen offset:2048
-; GFX12-NEXT: ; implicit-def: $vgpr4
; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_cbranch_execnz .LBB7_1
; GFX12-NEXT: ; %bb.2:
; GFX12-NEXT: s_mov_b32 exec_lo, s1
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[5:6], v[5:6]
+; GFX12-NEXT: v_max_num_f64_e32 v[5:6], v[5:6], v[5:6]
; GFX12-NEXT: s_mov_b32 s1, 0
; GFX12-NEXT: .LBB7_3: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Loop Header: Depth=1
@@ -1615,7 +1570,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_mov_b32 s2, exec_lo
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f64_e32 v[11:12], v[0:1], v[4:5]
+; GFX12-NEXT: v_max_num_f64_e32 v[11:12], v[0:1], v[5:6]
; GFX12-NEXT: v_dual_mov_b32 v0, v11 :: v_dual_mov_b32 v1, v12
; GFX12-NEXT: v_dual_mov_b32 v2, v13 :: v_dual_mov_b32 v3, v14
; GFX12-NEXT: .LBB7_4: ; Parent Loop BB7_3 Depth=1
@@ -1632,7 +1587,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_cbranch_execnz .LBB7_4
; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1
@@ -1686,27 +1641,26 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2
; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x800, v4
; GFX11-NEXT: s_mov_b32 s1, 0
; GFX11-NEXT: s_mov_b32 s2, exec_lo
; GFX11-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_readfirstlane_b32 s4, v9
; GFX11-NEXT: v_readfirstlane_b32 s5, v10
; GFX11-NEXT: v_readfirstlane_b32 s6, v7
; GFX11-NEXT: v_readfirstlane_b32 s7, v8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[9:10]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[7:8]
; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], 0 offen offset:2048
-; GFX11-NEXT: ; implicit-def: $vgpr4
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB7_1
; GFX11-NEXT: ; %bb.2:
; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6]
+; GFX11-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6]
; GFX11-NEXT: .p2align 6
; GFX11-NEXT: .LBB7_3: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Loop Header: Depth=1
@@ -1716,7 +1670,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX11-NEXT: s_mov_b32 s2, exec_lo
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f64 v[11:12], v[0:1], v[4:5]
+; GFX11-NEXT: v_max_f64 v[11:12], v[0:1], v[5:6]
; GFX11-NEXT: v_dual_mov_b32 v0, v11 :: v_dual_mov_b32 v1, v12
; GFX11-NEXT: v_dual_mov_b32 v2, v13 :: v_dual_mov_b32 v3, v14
; GFX11-NEXT: .LBB7_4: ; Parent Loop BB7_3 Depth=1
@@ -1732,7 +1686,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], 0 offen offset:2048 glc
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB7_4
; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1
@@ -1816,7 +1770,6 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: v_mov_b32_e32 v7, v2
; GFX908-NEXT: v_mov_b32_e32 v10, v1
; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_add_u32_e32 v15, 0x800, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v9
@@ -1829,12 +1782,11 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
; GFX908-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048
-; GFX908-NEXT: ; implicit-def: $vgpr4
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB7_1
; GFX908-NEXT: ; %bb.2:
; GFX908-NEXT: s_mov_b64 exec, s[6:7]
-; GFX908-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6]
+; GFX908-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6]
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: .LBB7_3: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Loop Header: Depth=1
@@ -1842,7 +1794,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: v_max_f64 v[0:1], v[13:14], v[13:14]
; GFX908-NEXT: s_mov_b64 s[12:13], exec
-; GFX908-NEXT: v_max_f64 v[11:12], v[0:1], v[4:5]
+; GFX908-NEXT: v_max_f64 v[11:12], v[0:1], v[5:6]
; GFX908-NEXT: v_mov_b32_e32 v0, v11
; GFX908-NEXT: v_mov_b32_e32 v1, v12
; GFX908-NEXT: v_mov_b32_e32 v2, v13
@@ -1858,7 +1810,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB7_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1
@@ -1882,7 +1834,6 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: v_mov_b32_e32 v7, v2
; GFX8-NEXT: v_mov_b32_e32 v10, v1
; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_add_u32_e32 v15, vcc, 0x800, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v9
@@ -1895,12 +1846,11 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
; GFX8-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048
-; GFX8-NEXT: ; implicit-def: $vgpr4
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB7_1
; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_mov_b64 exec, s[6:7]
-; GFX8-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6]
+; GFX8-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6]
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: .LBB7_3: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Loop Header: Depth=1
@@ -1908,7 +1858,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_max_f64 v[0:1], v[13:14], v[13:14]
; GFX8-NEXT: s_mov_b64 s[12:13], exec
-; GFX8-NEXT: v_max_f64 v[11:12], v[0:1], v[4:5]
+; GFX8-NEXT: v_max_f64 v[11:12], v[0:1], v[5:6]
; GFX8-NEXT: v_mov_b32_e32 v0, v11
; GFX8-NEXT: v_mov_b32_e32 v1, v12
; GFX8-NEXT: v_mov_b32_e32 v2, v13
@@ -1924,7 +1874,7 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB7_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1
@@ -2008,29 +1958,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX12-NEXT: v_mov_b32_e32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3]
-; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048
+; GFX12-NEXT: v_mov_b32_e32 v2, s16
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1]
+; GFX12-NEXT: v_mov_b32_e32 v8, s16
; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10]
-; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[0:1], v[6:7]
+; GFX12-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_mov_b32_e32 v3, v5
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -2053,30 +2001,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
-; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: v_mov_b32_e32 v2, s16
+; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v8, s16
; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -2088,31 +2034,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v2, v0
-; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: v_mov_b32_e32 v3, v1
-; GFX10-NEXT: s_add_i32 s4, s20, 0x800
-; GFX10-NEXT: v_mov_b32_e32 v6, s4
-; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX10-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX10-NEXT: v_mov_b32_e32 v2, s20
+; GFX10-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
+; GFX10-NEXT: v_mov_b32_e32 v8, s20
; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
; GFX10-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v10, v1
-; GFX10-NEXT: v_mov_b32_e32 v9, v0
+; GFX10-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX10-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX10-NEXT: v_mov_b32_e32 v0, v7
-; GFX10-NEXT: v_mov_b32_e32 v1, v8
-; GFX10-NEXT: v_mov_b32_e32 v2, v9
-; GFX10-NEXT: v_mov_b32_e32 v3, v10
-; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX10-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX10-NEXT: v_mov_b32_e32 v0, v2
+; GFX10-NEXT: v_mov_b32_e32 v1, v3
+; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_mov_b32_e32 v3, v5
+; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_mov_b32_e32 v5, v1
+; GFX10-NEXT: v_mov_b32_e32 v4, v0
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB8_1
@@ -2123,27 +2066,24 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot
; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory:
; GFX90A: ; %bb.0:
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX90A-NEXT: v_mov_b32_e32 v2, v0
-; GFX90A-NEXT: v_mov_b32_e32 v0, s20
-; GFX90A-NEXT: v_mov_b32_e32 v3, v1
-; GFX90A-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x800
+; GFX90A-NEXT: v_mov_b32_e32 v2, s20
+; GFX90A-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX90A-NEXT: v_mov_b32_e32 v6, s6
+; GFX90A-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
+; GFX90A-NEXT: v_mov_b32_e32 v8, s20
; GFX90A-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_pk_mov_b32 v[10:11], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11]
-; GFX90A-NEXT: v_max_f64 v[8:9], v[0:1], v[4:5]
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[8:9], v[8:9] op_sel:[0,1]
-; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[10:11], v[10:11] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX90A-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX90A-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[4:5], v[4:5] op_sel:[0,1]
+; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
-; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB8_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2153,30 +2093,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v2, v0
-; GFX908-NEXT: v_mov_b32_e32 v0, s20
-; GFX908-NEXT: v_mov_b32_e32 v3, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
+; GFX908-NEXT: v_mov_b32_e32 v2, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v8, s20
; GFX908-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX908-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX908-NEXT: v_mov_b32_e32 v0, v2
+; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_mov_b32_e32 v2, v4
+; GFX908-NEXT: v_mov_b32_e32 v3, v5
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB8_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2186,30 +2123,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, v0
-; GFX8-NEXT: v_mov_b32_e32 v0, s20
-; GFX8-NEXT: v_mov_b32_e32 v3, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
+; GFX8-NEXT: v_mov_b32_e32 v2, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v8, s20
; GFX8-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_mov_b32_e32 v2, v4
+; GFX8-NEXT: v_mov_b32_e32 v3, v5
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB8_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2219,30 +2153,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v2, v0
-; GFX7-NEXT: v_mov_b32_e32 v0, s20
-; GFX7-NEXT: v_mov_b32_e32 v3, v1
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX7-NEXT: s_add_i32 s6, s20, 0x800
-; GFX7-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
+; GFX7-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX7-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v6, s6
+; GFX7-NEXT: v_mov_b32_e32 v8, s20
; GFX7-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v10, v1
-; GFX7-NEXT: v_mov_b32_e32 v9, v0
-; GFX7-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX7-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v0, v7
-; GFX7-NEXT: v_mov_b32_e32 v1, v8
-; GFX7-NEXT: v_mov_b32_e32 v2, v9
-; GFX7-NEXT: v_mov_b32_e32 v3, v10
-; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX7-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX7-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX7-NEXT: v_mov_b32_e32 v0, v2
+; GFX7-NEXT: v_mov_b32_e32 v1, v3
+; GFX7-NEXT: v_mov_b32_e32 v2, v4
+; GFX7-NEXT: v_mov_b32_e32 v3, v5
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v5, v1
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v4, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB8_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2252,31 +2183,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remot
; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_remote_memory:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v2, v0
-; GFX6-NEXT: v_mov_b32_e32 v0, s20
-; GFX6-NEXT: v_mov_b32_e32 v3, v1
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
+; GFX6-NEXT: v_mov_b32_e32 v2, s20
+; GFX6-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
; GFX6-NEXT: s_add_i32 s6, s20, 0x800
-; GFX6-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX6-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX6-NEXT: s_mov_b64 s[4:5], 0
-; GFX6-NEXT: v_mov_b32_e32 v6, s6
+; GFX6-NEXT: v_mov_b32_e32 v8, s6
; GFX6-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v10, v1
-; GFX6-NEXT: v_mov_b32_e32 v9, v0
-; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX6-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v0, v7
-; GFX6-NEXT: v_mov_b32_e32 v1, v8
-; GFX6-NEXT: v_mov_b32_e32 v2, v9
-; GFX6-NEXT: v_mov_b32_e32 v3, v10
-; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; GFX6-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX6-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX6-NEXT: v_mov_b32_e32 v0, v2
+; GFX6-NEXT: v_mov_b32_e32 v1, v3
+; GFX6-NEXT: v_mov_b32_e32 v2, v4
+; GFX6-NEXT: v_mov_b32_e32 v3, v5
+; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX6-NEXT: v_mov_b32_e32 v5, v1
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_mov_b32_e32 v4, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB8_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2296,29 +2224,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX12-NEXT: v_mov_b32_e32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3]
-; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048
+; GFX12-NEXT: v_mov_b32_e32 v2, s16
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1]
+; GFX12-NEXT: v_mov_b32_e32 v8, s16
; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10]
-; GFX12-NEXT: v_max_num_f64_e32 v[7:8], v[0:1], v[4:5]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: v_max_num_f64_e32 v[2:3], v[0:1], v[6:7]
+; GFX12-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_mov_b32_e32 v3, v5
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -2341,30 +2267,28 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
-; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: v_mov_b32_e32 v2, s16
+; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v8, s16
; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX11-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -2396,30 +2320,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v2, v0
-; GFX908-NEXT: v_mov_b32_e32 v0, s20
-; GFX908-NEXT: v_mov_b32_e32 v3, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
+; GFX908-NEXT: v_mov_b32_e32 v2, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v8, s20
; GFX908-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX908-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX908-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX908-NEXT: v_mov_b32_e32 v0, v2
+; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_mov_b32_e32 v2, v4
+; GFX908-NEXT: v_mov_b32_e32 v3, v5
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB9_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2429,30 +2350,27 @@ define double @buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, v0
-; GFX8-NEXT: v_mov_b32_e32 v0, s20
-; GFX8-NEXT: v_mov_b32_e32 v3, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
+; GFX8-NEXT: v_mov_b32_e32 v2, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v8, s20
; GFX8-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX8-NEXT: v_max_f64 v[7:8], v[0:1], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX8-NEXT: v_max_f64 v[2:3], v[0:1], v[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_mov_b32_e32 v2, v4
+; GFX8-NEXT: v_mov_b32_e32 v3, v5
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB9_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6146,13 +6064,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x400
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v3, s4
+; GFX12-NEXT: v_mov_b32_e32 v3, s16
+; GFX12-NEXT: s_mov_b32 s4, 0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX12-NEXT: v_pk_max_num_f16 v2, v1, v1
; GFX12-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024
-; GFX12-NEXT: s_mov_b32 s4, 0
; GFX12-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
@@ -6163,7 +6079,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX12-NEXT: v_pk_max_num_f16 v4, v0, v2
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v5
@@ -6182,10 +6098,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_pk_max_f16 v2, v1, v1
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -6195,7 +6110,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX942-NEXT: v_pk_max_f16 v4, v0, v2
; GFX942-NEXT: s_nop 0
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -6210,12 +6125,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-NEXT: v_mov_b32_e32 v3, s16
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_pk_max_f16 v2, v1, v1
; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
-; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
@@ -6226,7 +6140,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX11-NEXT: v_pk_max_f16 v4, v0, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -6244,11 +6158,10 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: v_pk_max_f16 v2, v1, v1
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -6258,7 +6171,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX10-NEXT: v_pk_max_f16 v4, v0, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -6276,10 +6189,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_pk_max_f16 v2, v1, v1
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -6287,7 +6199,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX90A-NEXT: v_pk_max_f16 v0, v5, v5
; GFX90A-NEXT: v_pk_max_f16 v4, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -6304,10 +6216,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_pk_max_f16 v2, v1, v1
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -6316,7 +6227,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX908-NEXT: v_pk_max_f16 v4, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -6333,11 +6244,10 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_max_f16_sdwa v2, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_max_f16_e32 v3, v1, v1
-; GFX8-NEXT: v_mov_b32_e32 v4, s6
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -6349,7 +6259,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX8-NEXT: v_or_b32_e32 v5, v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: v_mov_b32_e32 v1, v6
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -6367,7 +6277,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: buffer_load_dword v3, v2, s[16:19], 0 offen offset:1024
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v0
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -6375,7 +6284,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4
-; GFX7-NEXT: v_mov_b32_e32 v4, s6
+; GFX7-NEXT: v_mov_b32_e32 v4, s20
; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
@@ -6392,7 +6301,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: v_or_b32_e32 v5, v7, v0
; GFX7-NEXT: v_mov_b32_e32 v8, v6
; GFX7-NEXT: v_mov_b32_e32 v7, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v7
@@ -6467,10 +6376,8 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_mov_b32_e32 v1, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x400
; GFX12-NEXT: v_pk_max_num_f16 v2, v0, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v3, s4
+; GFX12-NEXT: v_mov_b32_e32 v3, s16
; GFX12-NEXT: s_mov_b32 s4, 0
; GFX12-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
; GFX12-NEXT: .LBB17_1: ; %atomicrmw.start
@@ -6481,7 +6388,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-NEXT: v_pk_max_num_f16 v0, v0, v2
; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
@@ -6500,10 +6407,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s16
; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_pk_max_f16 v2, v0, v0
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -6512,7 +6418,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX942-NEXT: v_pk_max_f16 v0, v0, v2
; GFX942-NEXT: s_nop 0
; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[0:1]
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -6528,9 +6434,8 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v1, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
; GFX11-NEXT: v_pk_max_f16 v2, v0, v0
-; GFX11-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-NEXT: v_mov_b32_e32 v3, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-NEXT: .LBB17_1: ; %atomicrmw.start
@@ -6541,7 +6446,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_pk_max_f16 v0, v0, v2
; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -6559,9 +6464,8 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
; GFX10-NEXT: v_pk_max_f16 v2, v0, v0
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start
@@ -6572,7 +6476,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX10-NEXT: v_pk_max_f16 v0, v0, v2
; GFX10-NEXT: v_mov_b32_e32 v5, v1
; GFX10-NEXT: v_mov_b32_e32 v4, v0
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -6590,17 +6494,16 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_pk_max_f16 v2, v0, v0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_pk_max_f16 v0, v1, v1
; GFX90A-NEXT: v_pk_max_f16 v0, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -6617,10 +6520,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_pk_max_f16 v2, v0, v0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -6628,7 +6530,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX908-NEXT: v_pk_max_f16 v0, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: v_mov_b32_e32 v4, v0
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -6645,11 +6547,10 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_max_f16_sdwa v2, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_max_f16_e32 v3, v0, v0
-; GFX8-NEXT: v_mov_b32_e32 v4, s6
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -6660,7 +6561,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: v_mov_b32_e32 v6, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -6679,7 +6580,6 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v0
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -6687,7 +6587,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2
; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
@@ -6704,7 +6604,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2f16__offset__amdgpu_no_fin
; GFX7-NEXT: v_or_b32_e32 v4, v6, v3
; GFX7-NEXT: v_mov_b32_e32 v7, v5
; GFX7-NEXT: v_mov_b32_e32 v6, v4
-; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v6
@@ -6778,7 +6678,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX12-NEXT: s_mov_b32 s1, exec_lo
; GFX12-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: v_readfirstlane_b32 s4, v0
@@ -6793,8 +6692,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024
-; GFX12-NEXT: ; implicit-def: $vgpr4
+; GFX12-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024
; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_cbranch_execnz .LBB18_1
; GFX12-NEXT: ; %bb.2:
@@ -6805,13 +6703,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX12-NEXT: ; =>This Loop Header: Depth=1
; GFX12-NEXT: ; Child Loop BB18_4 Depth 2
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v4, v6, v6
+; GFX12-NEXT: v_pk_max_num_f16 v5, v7, v7
; GFX12-NEXT: s_mov_b32 s2, exec_lo
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_pk_max_num_f16 v5, v4, v8
-; GFX12-NEXT: v_mov_b32_e32 v4, v5
+; GFX12-NEXT: v_pk_max_num_f16 v6, v5, v8
; GFX12-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-NEXT: v_mov_b32_e32 v6, v7
; GFX12-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX12-NEXT: ; => This Inner Loop Header: Depth=2
; GFX12-NEXT: v_readfirstlane_b32 s4, v0
@@ -6826,14 +6724,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_cbranch_execnz .LBB18_4
; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX12-NEXT: s_mov_b32 exec_lo, s2
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX12-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX12-NEXT: v_mov_b32_e32 v7, v5
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -6841,14 +6739,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX12-NEXT: s_cbranch_execnz .LBB18_3
; GFX12-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v4
+; GFX12-NEXT: v_mov_b32_e32 v0, v5
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX942-NEXT: s_mov_b64 s[2:3], exec
; GFX942-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -6860,23 +6757,22 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3]
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
-; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024
-; GFX942-NEXT: ; implicit-def: $vgpr4
+; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB18_1
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: s_mov_b64 exec, s[2:3]
; GFX942-NEXT: s_mov_b64 s[2:3], 0
-; GFX942-NEXT: v_pk_max_f16 v9, v5, v5
+; GFX942-NEXT: v_pk_max_f16 v5, v5, v5
; GFX942-NEXT: .LBB18_3: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Loop Header: Depth=1
; GFX942-NEXT: ; Child Loop BB18_4 Depth 2
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_pk_max_f16 v4, v7, v7
+; GFX942-NEXT: v_pk_max_f16 v6, v9, v9
; GFX942-NEXT: s_mov_b64 s[8:9], exec
-; GFX942-NEXT: v_pk_max_f16 v6, v4, v9
+; GFX942-NEXT: v_pk_max_f16 v8, v6, v5
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7]
+; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9]
; GFX942-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX942-NEXT: ; => This Inner Loop Header: Depth=2
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -6889,27 +6785,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB18_4
; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX942-NEXT: s_mov_b64 exec, s[8:9]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v7, v4
+; GFX942-NEXT: v_mov_b32_e32 v9, v6
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB18_3
; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v0, v4
+; GFX942-NEXT: v_mov_b32_e32 v0, v6
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX11-NEXT: s_mov_b32 s1, 0
; GFX11-NEXT: s_mov_b32 s2, exec_lo
; GFX11-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
@@ -6923,8 +6818,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
-; GFX11-NEXT: ; implicit-def: $vgpr4
+; GFX11-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB18_1
; GFX11-NEXT: ; %bb.2:
@@ -6935,13 +6829,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX11-NEXT: ; =>This Loop Header: Depth=1
; GFX11-NEXT: ; Child Loop BB18_4 Depth 2
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v4, v6, v6
+; GFX11-NEXT: v_pk_max_f16 v5, v7, v7
; GFX11-NEXT: s_mov_b32 s2, exec_lo
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_pk_max_f16 v5, v4, v8
-; GFX11-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-NEXT: v_pk_max_f16 v6, v5, v8
; GFX11-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-NEXT: v_mov_b32_e32 v6, v7
; GFX11-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX11-NEXT: ; => This Inner Loop Header: Depth=2
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
@@ -6955,14 +6849,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB18_4
; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX11-NEXT: s_mov_b32 exec_lo, s2
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX11-NEXT: v_mov_b32_e32 v7, v5
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
@@ -6971,13 +6865,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX11-NEXT: s_cbranch_execnz .LBB18_3
; GFX11-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-NEXT: v_mov_b32_e32 v0, v5
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX10-NEXT: s_mov_b32 s5, 0
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
@@ -6989,8 +6882,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX10-NEXT: v_cmp_eq_u64_e64 s4, s[10:11], v[2:3]
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
-; GFX10-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX10-NEXT: ; implicit-def: $vgpr4
+; GFX10-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB18_1
@@ -7001,12 +6893,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX10-NEXT: ; =>This Loop Header: Depth=1
; GFX10-NEXT: ; Child Loop BB18_4 Depth 2
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v4, v6, v6
+; GFX10-NEXT: v_pk_max_f16 v5, v7, v7
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: v_pk_max_f16 v5, v4, v8
-; GFX10-NEXT: v_mov_b32_e32 v4, v5
+; GFX10-NEXT: v_pk_max_f16 v6, v5, v8
; GFX10-NEXT: v_mov_b32_e32 v5, v6
+; GFX10-NEXT: v_mov_b32_e32 v6, v7
; GFX10-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX10-NEXT: ; => This Inner Loop Header: Depth=2
; GFX10-NEXT: v_readfirstlane_b32 s8, v0
@@ -7018,15 +6910,15 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB18_4
; GFX10-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX10-NEXT: s_mov_b32 exec_lo, s6
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX10-NEXT: v_mov_b32_e32 v7, v5
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
@@ -7035,13 +6927,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX10-NEXT: s_cbranch_execnz .LBB18_3
; GFX10-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX10-NEXT: v_mov_b32_e32 v0, v4
+; GFX10-NEXT: v_mov_b32_e32 v0, v5
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX90A: ; %bb.0:
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX90A-NEXT: s_mov_b64 s[6:7], exec
; GFX90A-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -7053,22 +6944,21 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_nop 0
-; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX90A-NEXT: ; implicit-def: $vgpr4
+; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB18_1
; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_mov_b64 exec, s[6:7]
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
-; GFX90A-NEXT: v_pk_max_f16 v9, v5, v5
+; GFX90A-NEXT: v_pk_max_f16 v5, v5, v5
; GFX90A-NEXT: .LBB18_3: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Loop Header: Depth=1
; GFX90A-NEXT: ; Child Loop BB18_4 Depth 2
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_pk_max_f16 v4, v7, v7
-; GFX90A-NEXT: v_pk_max_f16 v6, v4, v9
+; GFX90A-NEXT: v_pk_max_f16 v6, v9, v9
+; GFX90A-NEXT: v_pk_max_f16 v8, v6, v5
; GFX90A-NEXT: s_mov_b64 s[12:13], exec
-; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1]
; GFX90A-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -7080,27 +6970,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB18_4
; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX90A-NEXT: s_mov_b64 exec, s[12:13]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v7, v4
+; GFX90A-NEXT: v_mov_b32_e32 v9, v6
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB18_3
; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v0, v4
+; GFX90A-NEXT: v_mov_b32_e32 v0, v6
; GFX90A-NEXT: s_setpc_b64 s[30:31]
;
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -7112,8 +7001,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
-; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX908-NEXT: ; implicit-def: $vgpr4
+; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB18_1
; GFX908-NEXT: ; %bb.2:
@@ -7124,11 +7012,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX908-NEXT: ; =>This Loop Header: Depth=1
; GFX908-NEXT: ; Child Loop BB18_4 Depth 2
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_pk_max_f16 v4, v6, v6
-; GFX908-NEXT: v_pk_max_f16 v5, v4, v8
-; GFX908-NEXT: v_mov_b32_e32 v4, v5
-; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_pk_max_f16 v5, v7, v7
+; GFX908-NEXT: v_pk_max_f16 v6, v5, v8
; GFX908-NEXT: v_mov_b32_e32 v5, v6
+; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_mov_b32_e32 v6, v7
; GFX908-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX908-NEXT: ; => This Inner Loop Header: Depth=2
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -7140,27 +7028,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB18_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX908-NEXT: s_mov_b64 exec, s[12:13]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v6, v4
+; GFX908-NEXT: v_mov_b32_e32 v7, v5
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB18_3
; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v0, v4
+; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -7172,8 +7059,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
-; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX8-NEXT: ; implicit-def: $vgpr4
+; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB18_1
; GFX8-NEXT: ; %bb.2:
@@ -7185,14 +7071,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX8-NEXT: ; =>This Loop Header: Depth=1
; GFX8-NEXT: ; Child Loop BB18_4 Depth 2
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_max_f16_sdwa v4, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_e32 v5, v6, v6
-; GFX8-NEXT: v_max_f16_sdwa v4, v4, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_max_f16_e32 v5, v5, v9
-; GFX8-NEXT: v_or_b32_e32 v5, v5, v4
-; GFX8-NEXT: v_mov_b32_e32 v4, v5
-; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_max_f16_sdwa v5, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v6, v7, v7
+; GFX8-NEXT: v_max_f16_sdwa v5, v5, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_f16_e32 v6, v6, v9
+; GFX8-NEXT: v_or_b32_e32 v6, v6, v5
; GFX8-NEXT: v_mov_b32_e32 v5, v6
+; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_mov_b32_e32 v6, v7
; GFX8-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX8-NEXT: ; => This Inner Loop Header: Depth=2
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -7204,27 +7090,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB18_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX8-NEXT: s_mov_b64 exec, s[12:13]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v5
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB18_3
; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v9, vcc, 0x400, v4
; GFX7-NEXT: s_mov_b64 s[6:7], exec
; GFX7-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -7235,39 +7120,38 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3]
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
-; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX7-NEXT: ; implicit-def: $vgpr4
+; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB18_1
; GFX7-NEXT: ; %bb.2:
; GFX7-NEXT: s_mov_b64 exec, s[6:7]
; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v6
-; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v5
+; GFX7-NEXT: v_cvt_f16_f32_e32 v9, v5
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8
; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5
; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v6
-; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v9
; GFX7-NEXT: s_mov_b64 s[6:7], 0
; GFX7-NEXT: .LBB18_3: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Loop Header: Depth=1
; GFX7-NEXT: ; Child Loop BB18_4 Depth 2
; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v5
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
+; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v7
; GFX7-NEXT: s_mov_b64 s[12:13], exec
-; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v5
-; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v6
; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX7-NEXT: v_max_f32_e32 v6, v6, v10
-; GFX7-NEXT: v_max_f32_e32 v7, v7, v11
-; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v6
+; GFX7-NEXT: v_or_b32_e32 v6, v6, v5
+; GFX7-NEXT: v_max_f32_e32 v7, v7, v10
+; GFX7-NEXT: v_max_f32_e32 v8, v8, v11
; GFX7-NEXT: v_cvt_f16_f32_e32 v7, v7
-; GFX7-NEXT: v_or_b32_e32 v6, v4, v5
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v8
-; GFX7-NEXT: v_or_b32_e32 v5, v7, v4
-; GFX7-NEXT: v_mov_b32_e32 v8, v6
-; GFX7-NEXT: v_mov_b32_e32 v7, v5
+; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX7-NEXT: v_or_b32_e32 v5, v8, v5
+; GFX7-NEXT: v_mov_b32_e32 v9, v6
+; GFX7-NEXT: v_mov_b32_e32 v8, v5
; GFX7-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX7-NEXT: ; => This Inner Loop Header: Depth=2
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -7279,23 +7163,23 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmax_ret_v2f16__offset__waterfall
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v9, s[8:11], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB18_4
; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX7-NEXT: s_mov_b64 exec, s[12:13]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8
; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v7, v6
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6
; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX7-NEXT: s_cbranch_execnz .LBB18_3
; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v0, v4
+; GFX7-NEXT: v_mov_b32_e32 v0, v7
; GFX7-NEXT: v_mov_b32_e32 v1, v5
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
@@ -7396,13 +7280,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
; GFX12-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX12-TRUE16-NEXT: s_add_co_i32 s4, s16, 0x400
-; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1
; GFX12-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024
; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
-; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
@@ -7431,7 +7313,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h
; GFX12-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
@@ -7452,11 +7334,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
; GFX12-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX12-FAKE16-NEXT: s_add_co_i32 s4, s16, 0x400
; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
-; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1
; GFX12-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024
; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
@@ -7487,7 +7367,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX12-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
@@ -7506,13 +7386,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -7534,7 +7413,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX942-NEXT: v_cndmask_b32_e64 v0, v5, v6, s[4:5]
; GFX942-NEXT: v_perm_b32 v6, v1, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[6:7]
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7
@@ -7549,12 +7428,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1
; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
@@ -7583,7 +7461,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -7601,10 +7479,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1
; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
@@ -7635,7 +7512,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -7654,9 +7531,8 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
@@ -7682,7 +7558,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX10-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v0, v5
; GFX10-NEXT: v_mov_b32_e32 v1, v6
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -7700,13 +7576,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -7727,7 +7602,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX90A-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v6, v1, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7
@@ -7744,13 +7619,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -7772,7 +7646,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX908-NEXT: v_perm_b32 v5, v1, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: v_mov_b32_e32 v1, v6
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -7789,11 +7663,10 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -7818,7 +7691,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX8-NEXT: v_alignbit_b32 v5, v1, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: v_mov_b32_e32 v1, v6
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -7834,7 +7707,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v4, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -7843,7 +7715,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v4
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v4
-; GFX7-NEXT: v_mov_b32_e32 v4, s6
+; GFX7-NEXT: v_mov_b32_e32 v4, s20
; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
@@ -7858,7 +7730,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: v_alignbit_b32 v0, v0, v6, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v1
; GFX7-NEXT: v_mov_b32_e32 v5, v0
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -7928,11 +7800,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
; GFX12-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
-; GFX12-TRUE16-NEXT: s_add_co_i32 s4, s16, 0x400
-; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
-; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
-; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0
; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
; GFX12-TRUE16-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
@@ -7958,7 +7828,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
@@ -7980,11 +7850,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
; GFX12-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX12-FAKE16-NEXT: s_add_co_i32 s4, s16, 0x400
-; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
-; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0
; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
; GFX12-FAKE16-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
@@ -8010,7 +7878,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX12-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
@@ -8029,13 +7897,12 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s16
; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -8056,7 +7923,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5]
; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1]
-; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -8072,11 +7939,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
-; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB20_1: ; %atomicrmw.start
@@ -8102,7 +7967,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -8121,11 +7986,9 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-FAKE16-NEXT: .p2align 6
; GFX11-FAKE16-NEXT: .LBB20_1: ; %atomicrmw.start
@@ -8151,7 +8014,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -8170,12 +8033,11 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
-; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -8197,7 +8059,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v6, v1
; GFX10-NEXT: v_mov_b32_e32 v5, v0
-; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -8215,13 +8077,12 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -8241,7 +8102,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -8258,13 +8119,12 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -8285,7 +8145,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v6, v1
; GFX908-NEXT: v_mov_b32_e32 v5, v0
-; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -8302,11 +8162,10 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -8330,7 +8189,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v6, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -8347,7 +8206,6 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v0
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -8356,7 +8214,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v2
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
@@ -8371,7 +8229,7 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: v_alignbit_b32 v3, v3, v6, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v4
; GFX7-NEXT: v_mov_b32_e32 v5, v3
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
@@ -8440,7 +8298,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX12-TRUE16-NEXT: s_mov_b32 s1, exec_lo
; GFX12-TRUE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8455,8 +8312,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024
-; GFX12-TRUE16-NEXT: ; implicit-def: $vgpr4
+; GFX12-TRUE16-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024
; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_1
; GFX12-TRUE16-NEXT: ; %bb.2:
@@ -8468,30 +8324,30 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX12-TRUE16-NEXT: ; =>This Loop Header: Depth=1
; GFX12-TRUE16-NEXT: ; Child Loop BB21_4 Depth 2
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7
; GFX12-TRUE16-NEXT: s_mov_b32 s2, exec_lo
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_dual_max_num_f32 v5, v5, v8 :: v_dual_max_num_f32 v4, v4, v9
-; GFX12-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_dual_max_num_f32 v6, v6, v8 :: v_dual_max_num_f32 v5, v5, v9
+; GFX12-TRUE16-NEXT: v_bfe_u32 v11, v6, 16, 1
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX12-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX12-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v5
; GFX12-TRUE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v7
; GFX12-TRUE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX12-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8506,14 +8362,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_4
; GFX12-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s2
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v7, v5
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -8521,7 +8377,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_3
; GFX12-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v5
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -8532,7 +8388,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX12-FAKE16-NEXT: s_mov_b32 s1, exec_lo
; GFX12-FAKE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8547,8 +8402,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024
-; GFX12-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX12-FAKE16-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024
; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_1
; GFX12-FAKE16-NEXT: ; %bb.2:
@@ -8560,30 +8414,30 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX12-FAKE16-NEXT: ; =>This Loop Header: Depth=1
; GFX12-FAKE16-NEXT: ; Child Loop BB21_4 Depth 2
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7
; GFX12-FAKE16-NEXT: s_mov_b32 s2, exec_lo
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_dual_max_num_f32 v5, v5, v9 :: v_dual_max_num_f32 v4, v4, v8
-; GFX12-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_dual_max_num_f32 v6, v6, v9 :: v_dual_max_num_f32 v5, v5, v8
+; GFX12-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX12-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX12-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
-; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
+; GFX12-FAKE16-NEXT: v_perm_b32 v6, v6, v5, 0x7060302
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v5
; GFX12-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v7
; GFX12-FAKE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX12-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8598,14 +8452,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_4
; GFX12-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s2
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v7, v5
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -8613,14 +8467,13 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_3
; GFX12-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v5
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX942-NEXT: s_mov_b64 s[2:3], exec
; GFX942-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -8632,40 +8485,39 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3]
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
-; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024
-; GFX942-NEXT: ; implicit-def: $vgpr4
+; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB21_1
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: s_mov_b64 exec, s[2:3]
; GFX942-NEXT: s_mov_b64 s[2:3], 0
-; GFX942-NEXT: v_lshlrev_b32_e32 v9, 16, v5
+; GFX942-NEXT: v_lshlrev_b32_e32 v10, 16, v5
; GFX942-NEXT: s_movk_i32 s10, 0x7fff
-; GFX942-NEXT: v_and_b32_e32 v10, 0xffff0000, v5
+; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX942-NEXT: s_mov_b32 s11, 0x7060302
; GFX942-NEXT: .LBB21_3: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Loop Header: Depth=1
; GFX942-NEXT: ; Child Loop BB21_4 Depth 2
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v7
-; GFX942-NEXT: v_max_f32_e32 v4, v4, v9
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s10
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_lshlrev_b32_e32 v6, 16, v9
+; GFX942-NEXT: v_max_f32_e32 v6, v6, v10
+; GFX942-NEXT: v_bfe_u32 v7, v6, 16, 1
+; GFX942-NEXT: v_add3_u32 v7, v7, v6, s10
+; GFX942-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX942-NEXT: s_mov_b64 s[8:9], exec
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v7
-; GFX942-NEXT: v_max_f32_e32 v5, v5, v10
-; GFX942-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX942-NEXT: v_add3_u32 v6, v6, v5, s10
-; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
+; GFX942-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
+; GFX942-NEXT: v_and_b32_e32 v7, 0xffff0000, v9
+; GFX942-NEXT: v_max_f32_e32 v7, v7, v5
+; GFX942-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX942-NEXT: v_add3_u32 v8, v8, v7, s10
+; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc
-; GFX942-NEXT: v_perm_b32 v6, v5, v4, s11
-; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7]
+; GFX942-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc
+; GFX942-NEXT: v_perm_b32 v8, v7, v6, s11
+; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9]
; GFX942-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX942-NEXT: ; => This Inner Loop Header: Depth=2
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -8678,27 +8530,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB21_4
; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX942-NEXT: s_mov_b64 exec, s[8:9]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v7, v4
+; GFX942-NEXT: v_mov_b32_e32 v9, v6
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB21_3
; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v0, v4
+; GFX942-NEXT: v_mov_b32_e32 v0, v6
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-TRUE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
@@ -8712,8 +8563,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-TRUE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024
; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_1
; GFX11-TRUE16-NEXT: ; %bb.2:
@@ -8726,28 +8576,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: ; =>This Loop Header: Depth=1
; GFX11-TRUE16-NEXT: ; Child Loop BB21_4 Depth 2
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7
; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_max_f32 v5, v5, v8 :: v_dual_max_f32 v4, v4, v9
-; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_dual_max_f32 v6, v6, v8 :: v_dual_max_f32 v5, v5, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v6, 16, 1
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v7
; GFX11-TRUE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX11-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8761,14 +8611,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_4
; GFX11-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v7, v5
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
; GFX11-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
@@ -8778,13 +8628,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v5
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-FAKE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
@@ -8798,8 +8647,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-FAKE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024
; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_1
; GFX11-FAKE16-NEXT: ; %bb.2:
@@ -8812,28 +8660,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: ; =>This Loop Header: Depth=1
; GFX11-FAKE16-NEXT: ; Child Loop BB21_4 Depth 2
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7
; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_max_f32 v5, v5, v9 :: v_dual_max_f32 v4, v4, v8
-; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_max_f32 v6, v6, v9 :: v_dual_max_f32 v5, v5, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v6, v5, 0x7060302
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v7
; GFX11-FAKE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX11-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8847,14 +8695,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_4
; GFX11-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v5
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
@@ -8864,13 +8712,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v5
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX10-NEXT: s_mov_b32 s5, 0
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
@@ -8882,8 +8729,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX10-NEXT: v_cmp_eq_u64_e64 s4, s[10:11], v[2:3]
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
-; GFX10-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX10-NEXT: ; implicit-def: $vgpr4
+; GFX10-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB21_1
@@ -8895,25 +8741,25 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX10-NEXT: ; =>This Loop Header: Depth=1
; GFX10-NEXT: ; Child Loop BB21_4 Depth 2
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: v_max_f32_e32 v4, v4, v8
-; GFX10-NEXT: v_max_f32_e32 v5, v5, v9
-; GFX10-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX10-NEXT: v_bfe_u32 v11, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX10-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX10-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX10-NEXT: v_max_f32_e32 v5, v5, v8
+; GFX10-NEXT: v_max_f32_e32 v6, v6, v9
+; GFX10-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX10-NEXT: v_bfe_u32 v11, v6, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
-; GFX10-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
-; GFX10-NEXT: v_mov_b32_e32 v4, v5
+; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX10-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
+; GFX10-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX10-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
+; GFX10-NEXT: v_perm_b32 v6, v6, v5, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v5, v6
+; GFX10-NEXT: v_mov_b32_e32 v6, v7
; GFX10-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX10-NEXT: ; => This Inner Loop Header: Depth=2
; GFX10-NEXT: v_readfirstlane_b32 s8, v0
@@ -8925,15 +8771,15 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB21_4
; GFX10-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX10-NEXT: s_mov_b32 exec_lo, s6
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX10-NEXT: v_mov_b32_e32 v7, v5
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
@@ -8942,13 +8788,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX10-NEXT: s_cbranch_execnz .LBB21_3
; GFX10-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX10-NEXT: v_mov_b32_e32 v0, v4
+; GFX10-NEXT: v_mov_b32_e32 v0, v5
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX90A: ; %bb.0:
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX90A-NEXT: s_mov_b64 s[6:7], exec
; GFX90A-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -8960,38 +8805,37 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_nop 0
-; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX90A-NEXT: ; implicit-def: $vgpr4
+; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB21_1
; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_mov_b64 exec, s[6:7]
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
-; GFX90A-NEXT: v_lshlrev_b32_e32 v9, 16, v5
+; GFX90A-NEXT: v_lshlrev_b32_e32 v10, 16, v5
; GFX90A-NEXT: s_movk_i32 s14, 0x7fff
-; GFX90A-NEXT: v_and_b32_e32 v10, 0xffff0000, v5
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX90A-NEXT: s_mov_b32 s15, 0x7060302
; GFX90A-NEXT: .LBB21_3: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Loop Header: Depth=1
; GFX90A-NEXT: ; Child Loop BB21_4 Depth 2
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v7
-; GFX90A-NEXT: v_max_f32_e32 v4, v4, v9
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s14
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v7
-; GFX90A-NEXT: v_max_f32_e32 v5, v5, v10
-; GFX90A-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX90A-NEXT: v_add3_u32 v6, v6, v5, s14
-; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc
-; GFX90A-NEXT: v_perm_b32 v6, v5, v4, s15
+; GFX90A-NEXT: v_lshlrev_b32_e32 v6, 16, v9
+; GFX90A-NEXT: v_max_f32_e32 v6, v6, v10
+; GFX90A-NEXT: v_bfe_u32 v7, v6, 16, 1
+; GFX90A-NEXT: v_add3_u32 v7, v7, v6, s14
+; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX90A-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
+; GFX90A-NEXT: v_and_b32_e32 v7, 0xffff0000, v9
+; GFX90A-NEXT: v_max_f32_e32 v7, v7, v5
+; GFX90A-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX90A-NEXT: v_add3_u32 v8, v8, v7, s14
+; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
+; GFX90A-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc
+; GFX90A-NEXT: v_perm_b32 v8, v7, v6, s15
; GFX90A-NEXT: s_mov_b64 s[12:13], exec
-; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1]
; GFX90A-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -9003,27 +8847,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB21_4
; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX90A-NEXT: s_mov_b64 exec, s[12:13]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v7, v4
+; GFX90A-NEXT: v_mov_b32_e32 v9, v6
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB21_3
; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v0, v4
+; GFX90A-NEXT: v_mov_b32_e32 v0, v6
; GFX90A-NEXT: s_setpc_b64 s[30:31]
;
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -9035,8 +8878,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
-; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX908-NEXT: ; implicit-def: $vgpr4
+; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB21_1
; GFX908-NEXT: ; %bb.2:
@@ -9050,24 +8892,24 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX908-NEXT: ; =>This Loop Header: Depth=1
; GFX908-NEXT: ; Child Loop BB21_4 Depth 2
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX908-NEXT: v_max_f32_e32 v4, v4, v8
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s14
-; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX908-NEXT: v_max_f32_e32 v5, v5, v9
-; GFX908-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX908-NEXT: v_add3_u32 v10, v10, v5, s14
-; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX908-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX908-NEXT: v_max_f32_e32 v5, v5, v8
+; GFX908-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX908-NEXT: v_add3_u32 v6, v6, v5, s14
+; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v5
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc
-; GFX908-NEXT: v_perm_b32 v5, v5, v4, s15
-; GFX908-NEXT: v_mov_b32_e32 v4, v5
-; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc
+; GFX908-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX908-NEXT: v_max_f32_e32 v6, v6, v9
+; GFX908-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX908-NEXT: v_add3_u32 v10, v10, v6, s14
+; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX908-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc
+; GFX908-NEXT: v_perm_b32 v6, v6, v5, s15
; GFX908-NEXT: v_mov_b32_e32 v5, v6
+; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_mov_b32_e32 v6, v7
; GFX908-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX908-NEXT: ; => This Inner Loop Header: Depth=2
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -9079,27 +8921,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB21_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX908-NEXT: s_mov_b64 exec, s[12:13]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v6, v4
+; GFX908-NEXT: v_mov_b32_e32 v7, v5
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB21_3
; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v0, v4
+; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -9111,8 +8952,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
-; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX8-NEXT: ; implicit-def: $vgpr4
+; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB21_1
; GFX8-NEXT: ; %bb.2:
@@ -9124,27 +8964,27 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX8-NEXT: ; =>This Loop Header: Depth=1
; GFX8-NEXT: ; Child Loop BB21_4 Depth 2
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX8-NEXT: v_max_f32_e32 v4, v4, v8
-; GFX8-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v4
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX8-NEXT: v_max_f32_e32 v5, v5, v9
-; GFX8-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v5
-; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10
-; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX8-NEXT: v_max_f32_e32 v5, v5, v8
+; GFX8-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v5
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v5, v5, v4, 16
-; GFX8-NEXT: v_mov_b32_e32 v4, v5
-; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc
+; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX8-NEXT: v_max_f32_e32 v6, v6, v9
+; GFX8-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v6
+; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10
+; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v6
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX8-NEXT: v_alignbit_b32 v6, v6, v5, 16
; GFX8-NEXT: v_mov_b32_e32 v5, v6
+; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_mov_b32_e32 v6, v7
; GFX8-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX8-NEXT: ; => This Inner Loop Header: Depth=2
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -9156,27 +8996,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB21_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX8-NEXT: s_mov_b64 exec, s[12:13]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v5
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB21_3
; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v8, vcc, 0x400, v4
; GFX7-NEXT: s_mov_b64 s[6:7], exec
; GFX7-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -9187,8 +9026,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3]
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
-; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX7-NEXT: ; implicit-def: $vgpr4
+; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB21_1
; GFX7-NEXT: ; %bb.2:
@@ -9196,27 +9034,27 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v7
-; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; GFX7-NEXT: s_mov_b64 s[6:7], 0
-; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v6
-; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v5
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v6
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v5
; GFX7-NEXT: .LBB21_3: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Loop Header: Depth=1
; GFX7-NEXT: ; Child Loop BB21_4 Depth 2
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v4
-; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v5
; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v7
-; GFX7-NEXT: v_max_f32_e32 v4, v4, v9
-; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v6
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v8
+; GFX7-NEXT: v_max_f32_e32 v5, v5, v10
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v6
; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_max_f32_e32 v7, v7, v10
-; GFX7-NEXT: v_alignbit_b32 v5, v5, v6, 16
-; GFX7-NEXT: v_alignbit_b32 v4, v4, v7, 16
-; GFX7-NEXT: v_mov_b32_e32 v7, v5
+; GFX7-NEXT: v_max_f32_e32 v8, v8, v11
+; GFX7-NEXT: v_alignbit_b32 v6, v6, v7, 16
+; GFX7-NEXT: v_alignbit_b32 v5, v5, v8, 16
+; GFX7-NEXT: v_mov_b32_e32 v9, v6
; GFX7-NEXT: s_mov_b64 s[12:13], exec
-; GFX7-NEXT: v_mov_b32_e32 v6, v4
+; GFX7-NEXT: v_mov_b32_e32 v8, v5
; GFX7-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX7-NEXT: ; => This Inner Loop Header: Depth=2
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -9228,23 +9066,23 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterf
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v8, s[8:11], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB21_4
; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX7-NEXT: s_mov_b64 exec, s[12:13]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v6, v5
-; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v6
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8
; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v6
+; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX7-NEXT: s_cbranch_execnz .LBB21_3
; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v0, v7
-; GFX7-NEXT: v_mov_b32_e32 v1, v4
+; GFX7-NEXT: v_mov_b32_e32 v0, v8
+; GFX7-NEXT: v_mov_b32_e32 v1, v7
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fmax_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
@@ -9353,10 +9191,9 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -9365,7 +9202,7 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_
; GFX942-NEXT: v_max_f32_e32 v4, v0, v2
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
; GFX942-NEXT: buffer_wbl2 sc0 sc1
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -9404,10 +9241,9 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -9416,7 +9252,7 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_
; GFX90A-NEXT: v_max_f32_e32 v4, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
; GFX90A-NEXT: buffer_wbl2
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_invl2
; GFX90A-NEXT: buffer_wbinvl1
@@ -9434,10 +9270,9 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -9446,7 +9281,7 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: v_max_f32_e32 v4, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -9463,10 +9298,9 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -9475,7 +9309,7 @@ define float @buffer_fat_ptr_system_atomic_fmax_ret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: v_max_f32_e32 v4, v0, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll
index 8ac6353..671f42c 100644
--- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll
+++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fmin.ll
@@ -37,10 +37,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -49,7 +48,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX942-NEXT: v_min_f32_e32 v4, v0, v2
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -88,10 +87,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -99,7 +97,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5
; GFX90A-NEXT: v_min_f32_e32 v4, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -116,10 +114,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -128,7 +125,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_min_f32_e32 v4, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -145,10 +142,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB0_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -157,7 +153,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_min_f32_e32 v4, v0, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -212,10 +208,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s16
; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_max_f32_e32 v2, v0, v0
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB1_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -223,7 +218,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_
; GFX942-NEXT: v_min_f32_e32 v0, v0, v2
; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[0:1]
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -262,17 +257,16 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_max_f32_e32 v2, v0, v0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB1_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_max_f32_e32 v0, v1, v1
; GFX90A-NEXT: v_min_f32_e32 v0, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -289,10 +283,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_max_f32_e32 v2, v0, v0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB1_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -300,7 +293,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: v_min_f32_e32 v0, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: v_mov_b32_e32 v4, v0
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -317,10 +310,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v0
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB1_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -328,7 +320,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: v_min_f32_e32 v0, v0, v2
; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: v_mov_b32_e32 v4, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -402,7 +394,6 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX942-NEXT: s_mov_b64 s[2:3], exec
; GFX942-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -414,22 +405,21 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3]
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
-; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024
-; GFX942-NEXT: ; implicit-def: $vgpr4
+; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB2_1
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: s_mov_b64 exec, s[2:3]
; GFX942-NEXT: s_mov_b64 s[2:3], 0
-; GFX942-NEXT: v_max_f32_e32 v9, v5, v5
+; GFX942-NEXT: v_max_f32_e32 v5, v5, v5
; GFX942-NEXT: .LBB2_3: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Loop Header: Depth=1
; GFX942-NEXT: ; Child Loop BB2_4 Depth 2
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_max_f32_e32 v4, v7, v7
-; GFX942-NEXT: v_min_f32_e32 v6, v4, v9
+; GFX942-NEXT: v_max_f32_e32 v6, v9, v9
+; GFX942-NEXT: v_min_f32_e32 v8, v6, v5
; GFX942-NEXT: s_mov_b64 s[8:9], exec
-; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7]
+; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9]
; GFX942-NEXT: buffer_wbl2 sc1
; GFX942-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1
; GFX942-NEXT: ; => This Inner Loop Header: Depth=2
@@ -443,21 +433,21 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB2_4
; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
; GFX942-NEXT: s_mov_b64 exec, s[8:9]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v7, v4
+; GFX942-NEXT: v_mov_b32_e32 v9, v6
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB2_3
; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v0, v4
+; GFX942-NEXT: v_mov_b32_e32 v0, v6
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
@@ -522,7 +512,6 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX90A: ; %bb.0:
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX90A-NEXT: s_mov_b64 s[6:7], exec
; GFX90A-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -534,22 +523,21 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_nop 0
-; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX90A-NEXT: ; implicit-def: $vgpr4
+; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB2_1
; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_mov_b64 exec, s[6:7]
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
-; GFX90A-NEXT: v_max_f32_e32 v9, v5, v5
+; GFX90A-NEXT: v_max_f32_e32 v5, v5, v5
; GFX90A-NEXT: .LBB2_3: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Loop Header: Depth=1
; GFX90A-NEXT: ; Child Loop BB2_4 Depth 2
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_max_f32_e32 v4, v7, v7
-; GFX90A-NEXT: v_min_f32_e32 v6, v4, v9
+; GFX90A-NEXT: v_max_f32_e32 v6, v9, v9
+; GFX90A-NEXT: v_min_f32_e32 v8, v6, v5
; GFX90A-NEXT: s_mov_b64 s[12:13], exec
-; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1]
; GFX90A-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1
; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -561,27 +549,26 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB2_4
; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
; GFX90A-NEXT: s_mov_b64 exec, s[12:13]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v7, v4
+; GFX90A-NEXT: v_mov_b32_e32 v9, v6
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB2_3
; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v0, v4
+; GFX90A-NEXT: v_mov_b32_e32 v0, v6
; GFX90A-NEXT: s_setpc_b64 s[30:31]
;
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -593,8 +580,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
-; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX908-NEXT: ; implicit-def: $vgpr4
+; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB2_1
; GFX908-NEXT: ; %bb.2:
@@ -605,11 +591,11 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX908-NEXT: ; =>This Loop Header: Depth=1
; GFX908-NEXT: ; Child Loop BB2_4 Depth 2
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_max_f32_e32 v4, v6, v6
-; GFX908-NEXT: v_min_f32_e32 v5, v4, v8
-; GFX908-NEXT: v_mov_b32_e32 v4, v5
-; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_max_f32_e32 v5, v7, v7
+; GFX908-NEXT: v_min_f32_e32 v6, v5, v8
; GFX908-NEXT: v_mov_b32_e32 v5, v6
+; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_mov_b32_e32 v6, v7
; GFX908-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1
; GFX908-NEXT: ; => This Inner Loop Header: Depth=2
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -621,27 +607,26 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB2_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
; GFX908-NEXT: s_mov_b64 exec, s[12:13]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v6, v4
+; GFX908-NEXT: v_mov_b32_e32 v7, v5
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB2_3
; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v0, v4
+; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -653,8 +638,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
-; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX8-NEXT: ; implicit-def: $vgpr4
+; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB2_1
; GFX8-NEXT: ; %bb.2:
@@ -665,11 +649,11 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX8-NEXT: ; =>This Loop Header: Depth=1
; GFX8-NEXT: ; Child Loop BB2_4 Depth 2
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mul_f32_e32 v4, 1.0, v6
-; GFX8-NEXT: v_min_f32_e32 v5, v4, v8
-; GFX8-NEXT: v_mov_b32_e32 v4, v5
-; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_mul_f32_e32 v5, 1.0, v7
+; GFX8-NEXT: v_min_f32_e32 v6, v5, v8
; GFX8-NEXT: v_mov_b32_e32 v5, v6
+; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_mov_b32_e32 v6, v7
; GFX8-NEXT: .LBB2_4: ; Parent Loop BB2_3 Depth=1
; GFX8-NEXT: ; => This Inner Loop Header: Depth=2
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -681,21 +665,21 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgp
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB2_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB2_3 Depth=1
; GFX8-NEXT: s_mov_b64 exec, s[12:13]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v5
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB2_3
; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__waterfall__amdgpu_no_fine_grained_memory:
@@ -777,10 +761,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -789,7 +772,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX942-NEXT: v_min_f32_e32 v4, v0, v2
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -804,11 +787,10 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_max_f32 v2, v1, v1
-; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mov_b32 v3, s16 :: v_dual_max_f32 v2, v1, v1
+; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
@@ -819,7 +801,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX11-NEXT: v_min_f32_e32 v4, v0, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -837,11 +819,10 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: v_max_f32_e32 v2, v1, v1
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -851,7 +832,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX10-NEXT: v_min_f32_e32 v4, v0, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -869,10 +850,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -880,7 +860,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5
; GFX90A-NEXT: v_min_f32_e32 v4, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -897,10 +877,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -909,7 +888,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX908-NEXT: v_min_f32_e32 v4, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -926,10 +905,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -938,7 +916,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX8-NEXT: v_min_f32_e32 v4, v0, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -955,10 +933,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX7-NEXT: v_mov_b32_e32 v1, v0
; GFX7-NEXT: v_mov_b32_e32 v0, s20
; GFX7-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; GFX7-NEXT: v_mov_b32_e32 v3, s6
+; GFX7-NEXT: v_mov_b32_e32 v3, s20
; GFX7-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -967,7 +944,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_remote
; GFX7-NEXT: v_min_f32_e32 v4, v0, v2
; GFX7-NEXT: v_mov_b32_e32 v0, v4
; GFX7-NEXT: v_mov_b32_e32 v1, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1035,10 +1012,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -1047,7 +1023,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX942-NEXT: v_min_f32_e32 v4, v0, v2
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1086,10 +1062,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -1097,7 +1072,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX90A-NEXT: v_max_f32_e32 v0, v5, v5
; GFX90A-NEXT: v_min_f32_e32 v4, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1114,10 +1089,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -1126,7 +1100,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX908-NEXT: v_min_f32_e32 v4, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1143,10 +1117,9 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB4_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -1155,7 +1128,7 @@ define float @buffer_fat_ptr_agent_atomic_fmin_ret_f32__offset__amdgpu_no_fine_g
; GFX8-NEXT: v_min_f32_e32 v4, v0, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -1201,29 +1174,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX12-NEXT: v_mov_b32_e32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3]
-; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048
+; GFX12-NEXT: v_mov_b32_e32 v2, s16
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1]
+; GFX12-NEXT: v_mov_b32_e32 v8, s16
; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10]
-; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[0:1], v[6:7]
+; GFX12-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_mov_b32_e32 v3, v5
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1246,30 +1217,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
-; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: v_mov_b32_e32 v2, s16
+; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v8, s16
; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -1301,30 +1270,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v2, v0
-; GFX908-NEXT: v_mov_b32_e32 v0, s20
-; GFX908-NEXT: v_mov_b32_e32 v3, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
+; GFX908-NEXT: v_mov_b32_e32 v2, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v8, s20
; GFX908-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX908-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX908-NEXT: v_mov_b32_e32 v0, v2
+; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_mov_b32_e32 v2, v4
+; GFX908-NEXT: v_mov_b32_e32 v3, v5
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB5_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1334,30 +1300,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, v0
-; GFX8-NEXT: v_mov_b32_e32 v0, s20
-; GFX8-NEXT: v_mov_b32_e32 v3, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
+; GFX8-NEXT: v_mov_b32_e32 v2, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v8, s20
; GFX8-NEXT: .LBB5_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_mov_b32_e32 v2, v4
+; GFX8-NEXT: v_mov_b32_e32 v3, v5
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB5_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1397,11 +1360,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_mov_b32_e32 v2, s16
; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[0:1], v[0:1]
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
-; GFX12-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], null offen offset:2048
+; GFX12-NEXT: v_mov_b32_e32 v6, s16
; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
@@ -1411,7 +1372,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[4:5]
; GFX12-NEXT: v_dual_mov_b32 v10, v3 :: v_dual_mov_b32 v9, v2
; GFX12-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v7, v0
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
@@ -1440,11 +1401,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v2, s16
; GFX11-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
-; GFX11-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: v_mov_b32_e32 v6, s16
; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b64 v[2:3], v2, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
@@ -1454,7 +1413,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_
; GFX11-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
; GFX11-NEXT: v_dual_mov_b32 v10, v3 :: v_dual_mov_b32 v9, v2
; GFX11-NEXT: v_dual_mov_b32 v8, v1 :: v_dual_mov_b32 v7, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[7:10], v6, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -1494,9 +1453,8 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_
; GFX908-NEXT: v_mov_b32_e32 v2, s20
; GFX908-NEXT: buffer_load_dwordx2 v[2:3], v2, s[16:19], 0 offen offset:2048
; GFX908-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v6, s20
; GFX908-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -1506,7 +1464,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_
; GFX908-NEXT: v_mov_b32_e32 v9, v2
; GFX908-NEXT: v_mov_b32_e32 v8, v1
; GFX908-NEXT: v_mov_b32_e32 v7, v0
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3]
@@ -1525,9 +1483,8 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_
; GFX8-NEXT: v_mov_b32_e32 v2, s20
; GFX8-NEXT: buffer_load_dwordx2 v[2:3], v2, s[16:19], 0 offen offset:2048
; GFX8-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v6, s20
; GFX8-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -1537,7 +1494,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__offset__amdgpu_no_fine_
; GFX8-NEXT: v_mov_b32_e32 v9, v2
; GFX8-NEXT: v_mov_b32_e32 v8, v1
; GFX8-NEXT: v_mov_b32_e32 v7, v0
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[7:10], v6, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[7:8], v[2:3]
@@ -1583,10 +1540,9 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2
; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
-; GFX12-NEXT: v_add_nc_u32_e32 v15, 0x800, v4
; GFX12-NEXT: s_mov_b32 s1, exec_lo
; GFX12-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-NEXT: v_readfirstlane_b32 s4, v9
; GFX12-NEXT: v_readfirstlane_b32 s5, v10
; GFX12-NEXT: v_readfirstlane_b32 s6, v7
@@ -1600,12 +1556,11 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], null offen offset:2048
-; GFX12-NEXT: ; implicit-def: $vgpr4
; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_cbranch_execnz .LBB7_1
; GFX12-NEXT: ; %bb.2:
; GFX12-NEXT: s_mov_b32 exec_lo, s1
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[5:6], v[5:6]
+; GFX12-NEXT: v_max_num_f64_e32 v[5:6], v[5:6], v[5:6]
; GFX12-NEXT: s_mov_b32 s1, 0
; GFX12-NEXT: .LBB7_3: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Loop Header: Depth=1
@@ -1615,7 +1570,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_mov_b32 s2, exec_lo
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_min_num_f64_e32 v[11:12], v[0:1], v[4:5]
+; GFX12-NEXT: v_min_num_f64_e32 v[11:12], v[0:1], v[5:6]
; GFX12-NEXT: v_dual_mov_b32 v0, v11 :: v_dual_mov_b32 v1, v12
; GFX12-NEXT: v_dual_mov_b32 v2, v13 :: v_dual_mov_b32 v3, v14
; GFX12-NEXT: .LBB7_4: ; Parent Loop BB7_3 Depth=1
@@ -1632,7 +1587,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_cbranch_execnz .LBB7_4
; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1
@@ -1686,27 +1641,26 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v7, v2
; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v15, 0x800, v4
; GFX11-NEXT: s_mov_b32 s1, 0
; GFX11-NEXT: s_mov_b32 s2, exec_lo
; GFX11-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_readfirstlane_b32 s4, v9
; GFX11-NEXT: v_readfirstlane_b32 s5, v10
; GFX11-NEXT: v_readfirstlane_b32 s6, v7
; GFX11-NEXT: v_readfirstlane_b32 s7, v8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[9:10]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[7:8]
; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-NEXT: buffer_load_b64 v[13:14], v4, s[4:7], 0 offen offset:2048
-; GFX11-NEXT: ; implicit-def: $vgpr4
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB7_1
; GFX11-NEXT: ; %bb.2:
; GFX11-NEXT: s_mov_b32 exec_lo, s2
-; GFX11-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6]
+; GFX11-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6]
; GFX11-NEXT: .p2align 6
; GFX11-NEXT: .LBB7_3: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Loop Header: Depth=1
@@ -1716,7 +1670,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX11-NEXT: s_mov_b32 s2, exec_lo
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_min_f64 v[11:12], v[0:1], v[4:5]
+; GFX11-NEXT: v_min_f64 v[11:12], v[0:1], v[5:6]
; GFX11-NEXT: v_dual_mov_b32 v0, v11 :: v_dual_mov_b32 v1, v12
; GFX11-NEXT: v_dual_mov_b32 v2, v13 :: v_dual_mov_b32 v3, v14
; GFX11-NEXT: .LBB7_4: ; Parent Loop BB7_3 Depth=1
@@ -1732,7 +1686,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v15, s[4:7], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v4, s[4:7], 0 offen offset:2048 glc
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB7_4
; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1
@@ -1816,7 +1770,6 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: v_mov_b32_e32 v7, v2
; GFX908-NEXT: v_mov_b32_e32 v10, v1
; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_add_u32_e32 v15, 0x800, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v9
@@ -1829,12 +1782,11 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
; GFX908-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048
-; GFX908-NEXT: ; implicit-def: $vgpr4
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB7_1
; GFX908-NEXT: ; %bb.2:
; GFX908-NEXT: s_mov_b64 exec, s[6:7]
-; GFX908-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6]
+; GFX908-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6]
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: .LBB7_3: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Loop Header: Depth=1
@@ -1842,7 +1794,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: v_max_f64 v[0:1], v[13:14], v[13:14]
; GFX908-NEXT: s_mov_b64 s[12:13], exec
-; GFX908-NEXT: v_min_f64 v[11:12], v[0:1], v[4:5]
+; GFX908-NEXT: v_min_f64 v[11:12], v[0:1], v[5:6]
; GFX908-NEXT: v_mov_b32_e32 v0, v11
; GFX908-NEXT: v_mov_b32_e32 v1, v12
; GFX908-NEXT: v_mov_b32_e32 v2, v13
@@ -1858,7 +1810,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB7_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1
@@ -1882,7 +1834,6 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: v_mov_b32_e32 v7, v2
; GFX8-NEXT: v_mov_b32_e32 v10, v1
; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_add_u32_e32 v15, vcc, 0x800, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB7_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v9
@@ -1895,12 +1846,11 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
; GFX8-NEXT: buffer_load_dwordx2 v[13:14], v4, s[8:11], 0 offen offset:2048
-; GFX8-NEXT: ; implicit-def: $vgpr4
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB7_1
; GFX8-NEXT: ; %bb.2:
; GFX8-NEXT: s_mov_b64 exec, s[6:7]
-; GFX8-NEXT: v_max_f64 v[4:5], v[5:6], v[5:6]
+; GFX8-NEXT: v_max_f64 v[5:6], v[5:6], v[5:6]
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: .LBB7_3: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Loop Header: Depth=1
@@ -1908,7 +1858,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_max_f64 v[0:1], v[13:14], v[13:14]
; GFX8-NEXT: s_mov_b64 s[12:13], exec
-; GFX8-NEXT: v_min_f64 v[11:12], v[0:1], v[4:5]
+; GFX8-NEXT: v_min_f64 v[11:12], v[0:1], v[5:6]
; GFX8-NEXT: v_mov_b32_e32 v0, v11
; GFX8-NEXT: v_mov_b32_e32 v1, v12
; GFX8-NEXT: v_mov_b32_e32 v2, v13
@@ -1924,7 +1874,7 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__waterfall__amdg
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v15, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v4, s[8:11], 0 offen offset:2048 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB7_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB7_3 Depth=1
@@ -2008,29 +1958,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX12-NEXT: v_mov_b32_e32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3]
-; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048
+; GFX12-NEXT: v_mov_b32_e32 v2, s16
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1]
+; GFX12-NEXT: v_mov_b32_e32 v8, s16
; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10]
-; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[0:1], v[6:7]
+; GFX12-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_mov_b32_e32 v3, v5
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -2053,30 +2001,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
-; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: v_mov_b32_e32 v2, s16
+; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v8, s16
; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -2088,31 +2034,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v2, v0
-; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: v_mov_b32_e32 v3, v1
-; GFX10-NEXT: s_add_i32 s4, s20, 0x800
-; GFX10-NEXT: v_mov_b32_e32 v6, s4
-; GFX10-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX10-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX10-NEXT: v_mov_b32_e32 v2, s20
+; GFX10-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
+; GFX10-NEXT: v_mov_b32_e32 v8, s20
; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
; GFX10-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_mov_b32_e32 v10, v1
-; GFX10-NEXT: v_mov_b32_e32 v9, v0
+; GFX10-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX10-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX10-NEXT: v_mov_b32_e32 v0, v7
-; GFX10-NEXT: v_mov_b32_e32 v1, v8
-; GFX10-NEXT: v_mov_b32_e32 v2, v9
-; GFX10-NEXT: v_mov_b32_e32 v3, v10
-; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX10-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX10-NEXT: v_mov_b32_e32 v0, v2
+; GFX10-NEXT: v_mov_b32_e32 v1, v3
+; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_mov_b32_e32 v3, v5
+; GFX10-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX10-NEXT: v_mov_b32_e32 v5, v1
+; GFX10-NEXT: v_mov_b32_e32 v4, v0
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB8_1
@@ -2123,27 +2066,24 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot
; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory:
; GFX90A: ; %bb.0:
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX90A-NEXT: v_mov_b32_e32 v2, v0
-; GFX90A-NEXT: v_mov_b32_e32 v0, s20
-; GFX90A-NEXT: v_mov_b32_e32 v3, v1
-; GFX90A-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x800
+; GFX90A-NEXT: v_mov_b32_e32 v2, s20
+; GFX90A-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
-; GFX90A-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX90A-NEXT: v_mov_b32_e32 v6, s6
+; GFX90A-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
+; GFX90A-NEXT: v_mov_b32_e32 v8, s20
; GFX90A-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_pk_mov_b32 v[10:11], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11]
-; GFX90A-NEXT: v_min_f64 v[8:9], v[0:1], v[4:5]
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[8:9], v[8:9] op_sel:[0,1]
-; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[10:11], v[10:11] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX90A-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX90A-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[4:5], v[4:5] op_sel:[0,1]
+; GFX90A-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
-; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB8_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2153,30 +2093,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v2, v0
-; GFX908-NEXT: v_mov_b32_e32 v0, s20
-; GFX908-NEXT: v_mov_b32_e32 v3, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
+; GFX908-NEXT: v_mov_b32_e32 v2, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v8, s20
; GFX908-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX908-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX908-NEXT: v_mov_b32_e32 v0, v2
+; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_mov_b32_e32 v2, v4
+; GFX908-NEXT: v_mov_b32_e32 v3, v5
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB8_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2186,30 +2123,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, v0
-; GFX8-NEXT: v_mov_b32_e32 v0, s20
-; GFX8-NEXT: v_mov_b32_e32 v3, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
+; GFX8-NEXT: v_mov_b32_e32 v2, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v8, s20
; GFX8-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_mov_b32_e32 v2, v4
+; GFX8-NEXT: v_mov_b32_e32 v3, v5
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB8_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2219,30 +2153,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v2, v0
-; GFX7-NEXT: v_mov_b32_e32 v0, s20
-; GFX7-NEXT: v_mov_b32_e32 v3, v1
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX7-NEXT: s_add_i32 s6, s20, 0x800
-; GFX7-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
+; GFX7-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX7-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX7-NEXT: s_mov_b64 s[4:5], 0
-; GFX7-NEXT: v_mov_b32_e32 v6, s6
+; GFX7-NEXT: v_mov_b32_e32 v8, s20
; GFX7-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v10, v1
-; GFX7-NEXT: v_mov_b32_e32 v9, v0
-; GFX7-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX7-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v0, v7
-; GFX7-NEXT: v_mov_b32_e32 v1, v8
-; GFX7-NEXT: v_mov_b32_e32 v2, v9
-; GFX7-NEXT: v_mov_b32_e32 v3, v10
-; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX7-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX7-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX7-NEXT: v_mov_b32_e32 v0, v2
+; GFX7-NEXT: v_mov_b32_e32 v1, v3
+; GFX7-NEXT: v_mov_b32_e32 v2, v4
+; GFX7-NEXT: v_mov_b32_e32 v3, v5
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v5, v1
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v4, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB8_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2252,31 +2183,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remot
; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_remote_memory:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v2, v0
-; GFX6-NEXT: v_mov_b32_e32 v0, s20
-; GFX6-NEXT: v_mov_b32_e32 v3, v1
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
+; GFX6-NEXT: v_mov_b32_e32 v2, s20
+; GFX6-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
; GFX6-NEXT: s_add_i32 s6, s20, 0x800
-; GFX6-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
+; GFX6-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX6-NEXT: s_mov_b64 s[4:5], 0
-; GFX6-NEXT: v_mov_b32_e32 v6, s6
+; GFX6-NEXT: v_mov_b32_e32 v8, s6
; GFX6-NEXT: .LBB8_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v10, v1
-; GFX6-NEXT: v_mov_b32_e32 v9, v0
-; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX6-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v0, v7
-; GFX6-NEXT: v_mov_b32_e32 v1, v8
-; GFX6-NEXT: v_mov_b32_e32 v2, v9
-; GFX6-NEXT: v_mov_b32_e32 v3, v10
-; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; GFX6-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX6-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX6-NEXT: v_mov_b32_e32 v0, v2
+; GFX6-NEXT: v_mov_b32_e32 v1, v3
+; GFX6-NEXT: v_mov_b32_e32 v2, v4
+; GFX6-NEXT: v_mov_b32_e32 v3, v5
+; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen glc
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX6-NEXT: v_mov_b32_e32 v5, v1
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_mov_b32_e32 v4, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB8_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2296,29 +2224,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX12-NEXT: v_mov_b32_e32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x800
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v6, s4
-; GFX12-NEXT: v_max_num_f64_e32 v[4:5], v[2:3], v[2:3]
-; GFX12-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], null offen offset:2048
+; GFX12-NEXT: v_mov_b32_e32 v2, s16
+; GFX12-NEXT: v_max_num_f64_e32 v[6:7], v[0:1], v[0:1]
+; GFX12-NEXT: v_mov_b32_e32 v8, s16
; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null offen offset:2048
; GFX12-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_max_num_f64_e32 v[0:1], v[9:10], v[9:10]
-; GFX12-NEXT: v_min_num_f64_e32 v[7:8], v[0:1], v[4:5]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX12-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: v_min_num_f64_e32 v[2:3], v[0:1], v[6:7]
+; GFX12-NEXT: v_mov_b32_e32 v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_mov_b32_e32 v3, v5
+; GFX12-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], null offen offset:2048 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -2341,30 +2267,28 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x800
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_mov_b32_e32 v6, s4
-; GFX11-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX11-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen offset:2048
+; GFX11-NEXT: v_mov_b32_e32 v2, s16
+; GFX11-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v8, s16
; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], 0 offen offset:2048
; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v10, v1 :: v_dual_mov_b32 v9, v0
+; GFX11-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX11-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v7 :: v_dual_mov_b32 v1, v8
-; GFX11-NEXT: v_dual_mov_b32 v2, v9 :: v_dual_mov_b32 v3, v10
-; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v6, s[0:3], 0 offen glc
+; GFX11-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
+; GFX11-NEXT: buffer_atomic_cmpswap_b64 v[0:3], v8, s[0:3], 0 offen offset:2048 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[9:10]
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
+; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
@@ -2396,30 +2320,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v2, v0
-; GFX908-NEXT: v_mov_b32_e32 v0, s20
-; GFX908-NEXT: v_mov_b32_e32 v3, v1
-; GFX908-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX908-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX908-NEXT: s_add_i32 s6, s20, 0x800
+; GFX908-NEXT: v_mov_b32_e32 v2, s20
+; GFX908-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX908-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX908-NEXT: s_mov_b64 s[4:5], 0
-; GFX908-NEXT: v_mov_b32_e32 v6, s6
+; GFX908-NEXT: v_mov_b32_e32 v8, s20
; GFX908-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_mov_b32_e32 v10, v1
-; GFX908-NEXT: v_mov_b32_e32 v9, v0
-; GFX908-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX908-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v0, v7
-; GFX908-NEXT: v_mov_b32_e32 v1, v8
-; GFX908-NEXT: v_mov_b32_e32 v2, v9
-; GFX908-NEXT: v_mov_b32_e32 v3, v10
-; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX908-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX908-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX908-NEXT: v_mov_b32_e32 v0, v2
+; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_mov_b32_e32 v2, v4
+; GFX908-NEXT: v_mov_b32_e32 v3, v5
+; GFX908-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX908-NEXT: v_mov_b32_e32 v4, v0
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB9_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2429,30 +2350,27 @@ define double @buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_f64__offset__amdgpu_no_fine_grained_memory__amdgpu_no_remote_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v2, v0
-; GFX8-NEXT: v_mov_b32_e32 v0, s20
-; GFX8-NEXT: v_mov_b32_e32 v3, v1
-; GFX8-NEXT: buffer_load_dwordx2 v[0:1], v0, s[16:19], 0 offen offset:2048
-; GFX8-NEXT: v_max_f64 v[4:5], v[2:3], v[2:3]
-; GFX8-NEXT: s_add_i32 s6, s20, 0x800
+; GFX8-NEXT: v_mov_b32_e32 v2, s20
+; GFX8-NEXT: buffer_load_dwordx2 v[4:5], v2, s[16:19], 0 offen offset:2048
+; GFX8-NEXT: v_max_f64 v[6:7], v[0:1], v[0:1]
; GFX8-NEXT: s_mov_b64 s[4:5], 0
-; GFX8-NEXT: v_mov_b32_e32 v6, s6
+; GFX8-NEXT: v_mov_b32_e32 v8, s20
; GFX8-NEXT: .LBB9_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v10, v1
-; GFX8-NEXT: v_mov_b32_e32 v9, v0
-; GFX8-NEXT: v_max_f64 v[0:1], v[9:10], v[9:10]
-; GFX8-NEXT: v_min_f64 v[7:8], v[0:1], v[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v0, v7
-; GFX8-NEXT: v_mov_b32_e32 v1, v8
-; GFX8-NEXT: v_mov_b32_e32 v2, v9
-; GFX8-NEXT: v_mov_b32_e32 v3, v10
-; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v6, s[16:19], 0 offen glc
+; GFX8-NEXT: v_max_f64 v[0:1], v[4:5], v[4:5]
+; GFX8-NEXT: v_min_f64 v[2:3], v[0:1], v[6:7]
+; GFX8-NEXT: v_mov_b32_e32 v0, v2
+; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_mov_b32_e32 v2, v4
+; GFX8-NEXT: v_mov_b32_e32 v3, v5
+; GFX8-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v8, s[16:19], 0 offen offset:2048 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[9:10]
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v5, v1
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v4, v0
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB9_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6146,13 +6064,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x400
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v3, s4
+; GFX12-NEXT: v_mov_b32_e32 v3, s16
+; GFX12-NEXT: s_mov_b32 s4, 0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX12-NEXT: v_pk_max_num_f16 v2, v1, v1
; GFX12-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024
-; GFX12-NEXT: s_mov_b32 s4, 0
; GFX12-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_loadcnt 0x0
@@ -6163,7 +6079,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX12-NEXT: v_pk_min_num_f16 v4, v0, v2
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v5
@@ -6182,10 +6098,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_pk_max_f16 v2, v1, v1
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -6195,7 +6110,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX942-NEXT: v_pk_min_f16 v4, v0, v2
; GFX942-NEXT: s_nop 0
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -6210,12 +6125,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-NEXT: v_mov_b32_e32 v3, s16
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_pk_max_f16 v2, v1, v1
; GFX11-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
-; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt vmcnt(0)
@@ -6226,7 +6140,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX11-NEXT: v_pk_min_f16 v4, v0, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -6244,11 +6158,10 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: v_pk_max_f16 v2, v1, v1
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -6258,7 +6171,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX10-NEXT: v_pk_min_f16 v4, v0, v2
; GFX10-NEXT: v_mov_b32_e32 v0, v4
; GFX10-NEXT: v_mov_b32_e32 v1, v5
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -6276,10 +6189,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_pk_max_f16 v2, v1, v1
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -6287,7 +6199,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX90A-NEXT: v_pk_max_f16 v0, v5, v5
; GFX90A-NEXT: v_pk_min_f16 v4, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -6304,10 +6216,9 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_pk_max_f16 v2, v1, v1
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -6316,7 +6227,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX908-NEXT: v_pk_min_f16 v4, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -6333,11 +6244,10 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_max_f16_sdwa v2, v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_max_f16_e32 v3, v1, v1
-; GFX8-NEXT: v_mov_b32_e32 v4, s6
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -6349,7 +6259,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX8-NEXT: v_or_b32_e32 v5, v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: v_mov_b32_e32 v1, v6
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -6367,7 +6277,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: buffer_load_dword v3, v2, s[16:19], 0 offen offset:1024
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v0
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -6375,7 +6284,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v3
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v4
-; GFX7-NEXT: v_mov_b32_e32 v4, s6
+; GFX7-NEXT: v_mov_b32_e32 v4, s20
; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
@@ -6392,7 +6301,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__amdgpu_no
; GFX7-NEXT: v_or_b32_e32 v5, v7, v0
; GFX7-NEXT: v_mov_b32_e32 v8, v6
; GFX7-NEXT: v_mov_b32_e32 v7, v5
-; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v4, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v7
@@ -6467,10 +6376,8 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_mov_b32_e32 v1, s16
-; GFX12-NEXT: s_add_co_i32 s4, s16, 0x400
; GFX12-NEXT: v_pk_max_num_f16 v2, v0, v0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: v_mov_b32_e32 v3, s4
+; GFX12-NEXT: v_mov_b32_e32 v3, s16
; GFX12-NEXT: s_mov_b32 s4, 0
; GFX12-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
; GFX12-NEXT: .LBB17_1: ; %atomicrmw.start
@@ -6481,7 +6388,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-NEXT: v_pk_min_num_f16 v0, v0, v2
; GFX12-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v1
@@ -6500,10 +6407,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s16
; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_pk_max_f16 v2, v0, v0
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -6512,7 +6418,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX942-NEXT: v_pk_min_f16 v0, v0, v2
; GFX942-NEXT: s_nop 0
; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[0:1]
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -6528,9 +6434,8 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_mov_b32_e32 v1, s16
-; GFX11-NEXT: s_add_i32 s4, s16, 0x400
; GFX11-NEXT: v_pk_max_f16 v2, v0, v0
-; GFX11-NEXT: v_mov_b32_e32 v3, s4
+; GFX11-NEXT: v_mov_b32_e32 v3, s16
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-NEXT: .LBB17_1: ; %atomicrmw.start
@@ -6541,7 +6446,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_pk_min_f16 v0, v0, v2
; GFX11-NEXT: v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v3, s[0:3], 0 offen offset:1024 glc
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
@@ -6559,9 +6464,8 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
; GFX10-NEXT: v_pk_max_f16 v2, v0, v0
-; GFX10-NEXT: v_mov_b32_e32 v3, s4
+; GFX10-NEXT: v_mov_b32_e32 v3, s20
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start
@@ -6572,7 +6476,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX10-NEXT: v_pk_min_f16 v0, v0, v2
; GFX10-NEXT: v_mov_b32_e32 v5, v1
; GFX10-NEXT: v_mov_b32_e32 v4, v0
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -6590,17 +6494,16 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_pk_max_f16 v2, v0, v0
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_pk_max_f16 v0, v1, v1
; GFX90A-NEXT: v_pk_min_f16 v0, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -6617,10 +6520,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_pk_max_f16 v2, v0, v0
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -6628,7 +6530,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX908-NEXT: v_pk_min_f16 v0, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v5, v1
; GFX908-NEXT: v_mov_b32_e32 v4, v0
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v1
@@ -6645,11 +6547,10 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_max_f16_sdwa v2, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_max_f16_e32 v3, v0, v0
-; GFX8-NEXT: v_mov_b32_e32 v4, s6
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -6660,7 +6561,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: v_mov_b32_e32 v6, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -6679,7 +6580,6 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v0
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: s_mov_b64 s[4:5], 0
; GFX7-NEXT: v_cvt_f32_f16_e32 v0, v1
; GFX7-NEXT: s_waitcnt vmcnt(0)
@@ -6687,7 +6587,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v2
; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v1
; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v5
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
@@ -6704,7 +6604,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2f16__offset__amdgpu_no_fin
; GFX7-NEXT: v_or_b32_e32 v4, v6, v3
; GFX7-NEXT: v_mov_b32_e32 v7, v5
; GFX7-NEXT: v_mov_b32_e32 v6, v4
-; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v6
@@ -6778,7 +6678,6 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX12-NEXT: s_wait_samplecnt 0x0
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX12-NEXT: s_mov_b32 s1, exec_lo
; GFX12-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: v_readfirstlane_b32 s4, v0
@@ -6793,8 +6692,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024
-; GFX12-NEXT: ; implicit-def: $vgpr4
+; GFX12-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024
; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_cbranch_execnz .LBB18_1
; GFX12-NEXT: ; %bb.2:
@@ -6805,13 +6703,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX12-NEXT: ; =>This Loop Header: Depth=1
; GFX12-NEXT: ; Child Loop BB18_4 Depth 2
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v4, v6, v6
+; GFX12-NEXT: v_pk_max_num_f16 v5, v7, v7
; GFX12-NEXT: s_mov_b32 s2, exec_lo
; GFX12-NEXT: s_wait_storecnt 0x0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-NEXT: v_pk_min_num_f16 v5, v4, v8
-; GFX12-NEXT: v_mov_b32_e32 v4, v5
+; GFX12-NEXT: v_pk_min_num_f16 v6, v5, v8
; GFX12-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-NEXT: v_mov_b32_e32 v6, v7
; GFX12-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX12-NEXT: ; => This Inner Loop Header: Depth=2
; GFX12-NEXT: v_readfirstlane_b32 s4, v0
@@ -6826,14 +6724,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-NEXT: s_cbranch_execnz .LBB18_4
; GFX12-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX12-NEXT: s_mov_b32 exec_lo, s2
; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX12-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX12-NEXT: v_mov_b32_e32 v7, v5
; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -6841,14 +6739,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX12-NEXT: s_cbranch_execnz .LBB18_3
; GFX12-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX12-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: v_mov_b32_e32 v0, v4
+; GFX12-NEXT: v_mov_b32_e32 v0, v5
; GFX12-NEXT: s_wait_loadcnt 0x0
; GFX12-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX942-NEXT: s_mov_b64 s[2:3], exec
; GFX942-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -6860,23 +6757,22 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3]
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
-; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024
-; GFX942-NEXT: ; implicit-def: $vgpr4
+; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB18_1
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: s_mov_b64 exec, s[2:3]
; GFX942-NEXT: s_mov_b64 s[2:3], 0
-; GFX942-NEXT: v_pk_max_f16 v9, v5, v5
+; GFX942-NEXT: v_pk_max_f16 v5, v5, v5
; GFX942-NEXT: .LBB18_3: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Loop Header: Depth=1
; GFX942-NEXT: ; Child Loop BB18_4 Depth 2
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_pk_max_f16 v4, v7, v7
+; GFX942-NEXT: v_pk_max_f16 v6, v9, v9
; GFX942-NEXT: s_mov_b64 s[8:9], exec
-; GFX942-NEXT: v_pk_min_f16 v6, v4, v9
+; GFX942-NEXT: v_pk_min_f16 v8, v6, v5
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7]
+; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9]
; GFX942-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX942-NEXT: ; => This Inner Loop Header: Depth=2
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -6889,27 +6785,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB18_4
; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX942-NEXT: s_mov_b64 exec, s[8:9]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v7, v4
+; GFX942-NEXT: v_mov_b32_e32 v9, v6
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB18_3
; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v0, v4
+; GFX942-NEXT: v_mov_b32_e32 v0, v6
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX11-NEXT: s_mov_b32 s1, 0
; GFX11-NEXT: s_mov_b32 s2, exec_lo
; GFX11-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
@@ -6923,8 +6818,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX11-NEXT: s_and_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
-; GFX11-NEXT: ; implicit-def: $vgpr4
+; GFX11-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB18_1
; GFX11-NEXT: ; %bb.2:
@@ -6935,13 +6829,13 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX11-NEXT: ; =>This Loop Header: Depth=1
; GFX11-NEXT: ; Child Loop BB18_4 Depth 2
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v4, v6, v6
+; GFX11-NEXT: v_pk_max_f16 v5, v7, v7
; GFX11-NEXT: s_mov_b32 s2, exec_lo
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_pk_min_f16 v5, v4, v8
-; GFX11-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-NEXT: v_pk_min_f16 v6, v5, v8
; GFX11-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-NEXT: v_mov_b32_e32 v6, v7
; GFX11-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX11-NEXT: ; => This Inner Loop Header: Depth=2
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
@@ -6955,14 +6849,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc
; GFX11-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_execnz .LBB18_4
; GFX11-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX11-NEXT: s_mov_b32 exec_lo, s2
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX11-NEXT: v_mov_b32_e32 v7, v5
; GFX11-NEXT: buffer_gl1_inv
; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_or_b32 s1, vcc_lo, s1
@@ -6971,13 +6865,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX11-NEXT: s_cbranch_execnz .LBB18_3
; GFX11-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX11-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-NEXT: v_mov_b32_e32 v0, v5
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX10-NEXT: s_mov_b32 s5, 0
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
@@ -6989,8 +6882,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX10-NEXT: v_cmp_eq_u64_e64 s4, s[10:11], v[2:3]
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
-; GFX10-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX10-NEXT: ; implicit-def: $vgpr4
+; GFX10-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB18_1
@@ -7001,12 +6893,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX10-NEXT: ; =>This Loop Header: Depth=1
; GFX10-NEXT: ; Child Loop BB18_4 Depth 2
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v4, v6, v6
+; GFX10-NEXT: v_pk_max_f16 v5, v7, v7
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: v_pk_min_f16 v5, v4, v8
-; GFX10-NEXT: v_mov_b32_e32 v4, v5
+; GFX10-NEXT: v_pk_min_f16 v6, v5, v8
; GFX10-NEXT: v_mov_b32_e32 v5, v6
+; GFX10-NEXT: v_mov_b32_e32 v6, v7
; GFX10-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX10-NEXT: ; => This Inner Loop Header: Depth=2
; GFX10-NEXT: v_readfirstlane_b32 s8, v0
@@ -7018,15 +6910,15 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB18_4
; GFX10-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX10-NEXT: s_mov_b32 exec_lo, s6
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX10-NEXT: v_mov_b32_e32 v7, v5
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
@@ -7035,13 +6927,12 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX10-NEXT: s_cbranch_execnz .LBB18_3
; GFX10-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX10-NEXT: v_mov_b32_e32 v0, v4
+; GFX10-NEXT: v_mov_b32_e32 v0, v5
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX90A: ; %bb.0:
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX90A-NEXT: s_mov_b64 s[6:7], exec
; GFX90A-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -7053,22 +6944,21 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_nop 0
-; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX90A-NEXT: ; implicit-def: $vgpr4
+; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB18_1
; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_mov_b64 exec, s[6:7]
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
-; GFX90A-NEXT: v_pk_max_f16 v9, v5, v5
+; GFX90A-NEXT: v_pk_max_f16 v5, v5, v5
; GFX90A-NEXT: .LBB18_3: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Loop Header: Depth=1
; GFX90A-NEXT: ; Child Loop BB18_4 Depth 2
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_pk_max_f16 v4, v7, v7
-; GFX90A-NEXT: v_pk_min_f16 v6, v4, v9
+; GFX90A-NEXT: v_pk_max_f16 v6, v9, v9
+; GFX90A-NEXT: v_pk_min_f16 v8, v6, v5
; GFX90A-NEXT: s_mov_b64 s[12:13], exec
-; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1]
; GFX90A-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -7080,27 +6970,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB18_4
; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX90A-NEXT: s_mov_b64 exec, s[12:13]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v7, v4
+; GFX90A-NEXT: v_mov_b32_e32 v9, v6
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB18_3
; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v0, v4
+; GFX90A-NEXT: v_mov_b32_e32 v0, v6
; GFX90A-NEXT: s_setpc_b64 s[30:31]
;
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -7112,8 +7001,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
-; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX908-NEXT: ; implicit-def: $vgpr4
+; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB18_1
; GFX908-NEXT: ; %bb.2:
@@ -7124,11 +7012,11 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX908-NEXT: ; =>This Loop Header: Depth=1
; GFX908-NEXT: ; Child Loop BB18_4 Depth 2
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_pk_max_f16 v4, v6, v6
-; GFX908-NEXT: v_pk_min_f16 v5, v4, v8
-; GFX908-NEXT: v_mov_b32_e32 v4, v5
-; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_pk_max_f16 v5, v7, v7
+; GFX908-NEXT: v_pk_min_f16 v6, v5, v8
; GFX908-NEXT: v_mov_b32_e32 v5, v6
+; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_mov_b32_e32 v6, v7
; GFX908-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX908-NEXT: ; => This Inner Loop Header: Depth=2
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -7140,27 +7028,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB18_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX908-NEXT: s_mov_b64 exec, s[12:13]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v6, v4
+; GFX908-NEXT: v_mov_b32_e32 v7, v5
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB18_3
; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v0, v4
+; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -7172,8 +7059,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
-; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX8-NEXT: ; implicit-def: $vgpr4
+; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB18_1
; GFX8-NEXT: ; %bb.2:
@@ -7185,14 +7071,14 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX8-NEXT: ; =>This Loop Header: Depth=1
; GFX8-NEXT: ; Child Loop BB18_4 Depth 2
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_max_f16_sdwa v4, v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_e32 v5, v6, v6
-; GFX8-NEXT: v_min_f16_sdwa v4, v4, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_min_f16_e32 v5, v5, v9
-; GFX8-NEXT: v_or_b32_e32 v5, v5, v4
-; GFX8-NEXT: v_mov_b32_e32 v4, v5
-; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_max_f16_sdwa v5, v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v6, v7, v7
+; GFX8-NEXT: v_min_f16_sdwa v5, v5, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_min_f16_e32 v6, v6, v9
+; GFX8-NEXT: v_or_b32_e32 v6, v6, v5
; GFX8-NEXT: v_mov_b32_e32 v5, v6
+; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_mov_b32_e32 v6, v7
; GFX8-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX8-NEXT: ; => This Inner Loop Header: Depth=2
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -7204,27 +7090,26 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB18_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX8-NEXT: s_mov_b64 exec, s[12:13]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v5
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB18_3
; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v9, vcc, 0x400, v4
; GFX7-NEXT: s_mov_b64 s[6:7], exec
; GFX7-NEXT: .LBB18_1: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -7235,39 +7120,38 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3]
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
-; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX7-NEXT: ; implicit-def: $vgpr4
+; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB18_1
; GFX7-NEXT: ; %bb.2:
; GFX7-NEXT: s_mov_b64 exec, s[6:7]
; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v6
-; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v5
+; GFX7-NEXT: v_cvt_f16_f32_e32 v9, v5
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8
; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5
; GFX7-NEXT: v_cvt_f32_f16_e32 v10, v6
-; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v11, v9
; GFX7-NEXT: s_mov_b64 s[6:7], 0
; GFX7-NEXT: .LBB18_3: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Loop Header: Depth=1
; GFX7-NEXT: ; Child Loop BB18_4 Depth 2
; GFX7-NEXT: v_cvt_f16_f32_e32 v5, v5
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
+; GFX7-NEXT: v_cvt_f16_f32_e32 v6, v7
; GFX7-NEXT: s_mov_b64 s[12:13], exec
-; GFX7-NEXT: v_cvt_f32_f16_e32 v6, v5
-; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v5
+; GFX7-NEXT: v_cvt_f32_f16_e32 v8, v6
; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX7-NEXT: v_min_f32_e32 v6, v6, v10
-; GFX7-NEXT: v_min_f32_e32 v7, v7, v11
-; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v6
+; GFX7-NEXT: v_or_b32_e32 v6, v6, v5
+; GFX7-NEXT: v_min_f32_e32 v7, v7, v10
+; GFX7-NEXT: v_min_f32_e32 v8, v8, v11
; GFX7-NEXT: v_cvt_f16_f32_e32 v7, v7
-; GFX7-NEXT: v_or_b32_e32 v6, v4, v5
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v8
-; GFX7-NEXT: v_or_b32_e32 v5, v7, v4
-; GFX7-NEXT: v_mov_b32_e32 v8, v6
-; GFX7-NEXT: v_mov_b32_e32 v7, v5
+; GFX7-NEXT: v_cvt_f16_f32_e32 v8, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX7-NEXT: v_or_b32_e32 v5, v8, v5
+; GFX7-NEXT: v_mov_b32_e32 v9, v6
+; GFX7-NEXT: v_mov_b32_e32 v8, v5
; GFX7-NEXT: .LBB18_4: ; Parent Loop BB18_3 Depth=1
; GFX7-NEXT: ; => This Inner Loop Header: Depth=2
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -7279,23 +7163,23 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fmin_ret_v2f16__offset__waterfall
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: buffer_atomic_cmpswap v[7:8], v9, s[8:11], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB18_4
; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB18_3 Depth=1
; GFX7-NEXT: s_mov_b64 exec, s[12:13]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v7
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v8
+; GFX7-NEXT: v_cvt_f32_f16_e32 v7, v8
; GFX7-NEXT: v_cvt_f32_f16_e32 v5, v5
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v7, v6
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6
; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX7-NEXT: s_cbranch_execnz .LBB18_3
; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v0, v4
+; GFX7-NEXT: v_mov_b32_e32 v0, v7
; GFX7-NEXT: v_mov_b32_e32 v1, v5
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
@@ -7396,13 +7280,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
; GFX12-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX12-TRUE16-NEXT: s_add_co_i32 s4, s16, 0x400
-; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
+; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1
; GFX12-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024
; GFX12-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
-; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
@@ -7431,7 +7313,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h
; GFX12-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
@@ -7452,11 +7334,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
; GFX12-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX12-FAKE16-NEXT: s_add_co_i32 s4, s16, 0x400
; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
-; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1
; GFX12-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen offset:1024
; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
@@ -7487,7 +7367,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX12-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v0, v6
@@ -7506,13 +7386,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -7534,7 +7413,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX942-NEXT: v_cndmask_b32_e64 v0, v5, v6, s[4:5]
; GFX942-NEXT: v_perm_b32 v6, v1, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[6:7]
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7
@@ -7549,12 +7428,11 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v1
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v1
; GFX11-TRUE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v1
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
@@ -7583,7 +7461,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v0.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -7601,10 +7479,9 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, s16
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v1
; GFX11-FAKE16-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
@@ -7635,7 +7512,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX11-FAKE16-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, v5 :: v_dual_mov_b32 v1, v6
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[0:1], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -7654,9 +7531,8 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: v_mov_b32_e32 v0, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX10-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
@@ -7682,7 +7558,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX10-NEXT: v_perm_b32 v5, v1, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v0, v5
; GFX10-NEXT: v_mov_b32_e32 v1, v6
-; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -7700,13 +7576,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -7727,7 +7602,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX90A-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v6, v1, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[6:7], v[6:7] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v0, v7
@@ -7744,13 +7619,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -7772,7 +7646,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX908-NEXT: v_perm_b32 v5, v1, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: v_mov_b32_e32 v1, v6
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -7789,11 +7663,10 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -7818,7 +7691,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX8-NEXT: v_alignbit_b32 v5, v1, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: v_mov_b32_e32 v1, v6
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v6
@@ -7834,7 +7707,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v4, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -7843,7 +7715,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v4
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v4
-; GFX7-NEXT: v_mov_b32_e32 v4, s6
+; GFX7-NEXT: v_mov_b32_e32 v4, s20
; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
@@ -7858,7 +7730,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__amdgpu
; GFX7-NEXT: v_alignbit_b32 v0, v0, v6, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v1
; GFX7-NEXT: v_mov_b32_e32 v5, v0
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -7928,11 +7800,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
; GFX12-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
-; GFX12-TRUE16-NEXT: s_add_co_i32 s4, s16, 0x400
-; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
-; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
-; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
+; GFX12-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0
; GFX12-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
; GFX12-TRUE16-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
@@ -7958,7 +7828,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
@@ -7980,11 +7850,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
; GFX12-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX12-FAKE16-NEXT: s_add_co_i32 s4, s16, 0x400
-; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
-; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0
; GFX12-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX12-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], null offen offset:1024
; GFX12-FAKE16-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
@@ -8010,7 +7878,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX12-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v1
@@ -8029,13 +7897,12 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s16
; GFX942-NEXT: buffer_load_dword v1, v1, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s4, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[6:7], 0
; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX942-NEXT: s_movk_i32 s8, 0x7fff
; GFX942-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX942-NEXT: s_mov_b32 s9, 0x7060302
-; GFX942-NEXT: v_mov_b32_e32 v4, s4
+; GFX942-NEXT: v_mov_b32_e32 v4, s16
; GFX942-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -8056,7 +7923,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX942-NEXT: v_cndmask_b32_e64 v0, v6, v7, s[4:5]
; GFX942-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[0:1]
-; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -8072,11 +7939,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_and_b32 v2, 0xffff0000, v0
-; GFX11-TRUE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_lshlrev_b32 v3, 16, v0
-; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_lshlrev_b32 v3, 16, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB20_1: ; %atomicrmw.start
@@ -8102,7 +7967,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v6.h
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
@@ -8121,11 +7986,9 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v1, s16 :: v_dual_lshlrev_b32 v2, 16, v0
-; GFX11-FAKE16-NEXT: s_add_i32 s4, s16, 0x400
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_and_b32 v3, 0xffff0000, v0
-; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_and_b32 v3, 0xffff0000, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s5, 0
+; GFX11-FAKE16-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 offen offset:1024
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x1
; GFX11-FAKE16-NEXT: .p2align 6
; GFX11-FAKE16-NEXT: .LBB20_1: ; %atomicrmw.start
@@ -8151,7 +8014,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, v1 :: v_dual_mov_b32 v5, v0
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[0:3], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
@@ -8170,12 +8033,11 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v1, s20
-; GFX10-NEXT: s_add_i32 s4, s20, 0x400
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX10-NEXT: v_mov_b32_e32 v4, s4
-; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
+; GFX10-NEXT: v_mov_b32_e32 v4, s20
; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
; GFX10-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -8197,7 +8059,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX10-NEXT: v_perm_b32 v0, v5, v0, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v6, v1
; GFX10-NEXT: v_mov_b32_e32 v5, v0
-; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
@@ -8215,13 +8077,12 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_mov_b32_e32 v1, s20
; GFX90A-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s4, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX90A-NEXT: s_movk_i32 s8, 0x7fff
; GFX90A-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX90A-NEXT: s_mov_b32 s9, 0x7060302
-; GFX90A-NEXT: v_mov_b32_e32 v4, s4
+; GFX90A-NEXT: v_mov_b32_e32 v4, s20
; GFX90A-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -8241,7 +8102,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
; GFX90A-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[0:1], v[0:1] op_sel:[0,1]
-; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v1
@@ -8258,13 +8119,12 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX908-NEXT: v_mov_b32_e32 v1, s20
; GFX908-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s4, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[6:7], 0
; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX908-NEXT: s_movk_i32 s8, 0x7fff
; GFX908-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
; GFX908-NEXT: s_mov_b32 s9, 0x7060302
-; GFX908-NEXT: v_mov_b32_e32 v4, s4
+; GFX908-NEXT: v_mov_b32_e32 v4, s20
; GFX908-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -8285,7 +8145,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX908-NEXT: v_perm_b32 v0, v5, v0, s9
; GFX908-NEXT: v_mov_b32_e32 v6, v1
; GFX908-NEXT: v_mov_b32_e32 v5, v0
-; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -8302,11 +8162,10 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, s20
; GFX8-NEXT: buffer_load_dword v1, v1, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s4, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[6:7], 0
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v0
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v0
-; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: v_mov_b32_e32 v4, s20
; GFX8-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -8330,7 +8189,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX8-NEXT: v_alignbit_b32 v0, v5, v0, 16
; GFX8-NEXT: v_mov_b32_e32 v6, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v1
@@ -8347,7 +8206,6 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: buffer_load_dword v2, v2, s[16:19], 0 offen offset:1024
-; GFX7-NEXT: s_add_i32 s6, s20, 0x400
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v0
; GFX7-NEXT: s_mov_b64 s[4:5], 0
@@ -8356,7 +8214,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v2
-; GFX7-NEXT: v_mov_b32_e32 v2, s6
+; GFX7-NEXT: v_mov_b32_e32 v2, s20
; GFX7-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
@@ -8371,7 +8229,7 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_v2bf16__offset__amdgpu_no_fi
; GFX7-NEXT: v_alignbit_b32 v3, v3, v6, 16
; GFX7-NEXT: v_mov_b32_e32 v6, v4
; GFX7-NEXT: v_mov_b32_e32 v5, v3
-; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[5:6], v2, s[16:19], 0 offen offset:1024 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
@@ -8440,7 +8298,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX12-TRUE16-NEXT: s_mov_b32 s1, exec_lo
; GFX12-TRUE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8455,8 +8312,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024
-; GFX12-TRUE16-NEXT: ; implicit-def: $vgpr4
+; GFX12-TRUE16-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024
; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_1
; GFX12-TRUE16-NEXT: ; %bb.2:
@@ -8468,30 +8324,30 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX12-TRUE16-NEXT: ; =>This Loop Header: Depth=1
; GFX12-TRUE16-NEXT: ; Child Loop BB21_4 Depth 2
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7
; GFX12-TRUE16-NEXT: s_mov_b32 s2, exec_lo
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_dual_min_num_f32 v5, v5, v8 :: v_dual_min_num_f32 v4, v4, v9
-; GFX12-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_dual_min_num_f32 v6, v6, v8 :: v_dual_min_num_f32 v5, v5, v9
+; GFX12-TRUE16-NEXT: v_bfe_u32 v11, v6, 16, 1
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX12-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX12-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX12-TRUE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v5
; GFX12-TRUE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v7
; GFX12-TRUE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX12-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
; GFX12-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8506,14 +8362,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_4
; GFX12-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX12-TRUE16-NEXT: s_mov_b32 exec_lo, s2
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v7, v5
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -8521,7 +8377,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB21_3
; GFX12-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v0, v5
; GFX12-TRUE16-NEXT: s_wait_loadcnt 0x0
; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
@@ -8532,7 +8388,6 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX12-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX12-FAKE16-NEXT: s_mov_b32 s1, exec_lo
; GFX12-FAKE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8547,8 +8402,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], null offen offset:1024
-; GFX12-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX12-FAKE16-NEXT: buffer_load_b32 v7, v4, s[4:7], null offen offset:1024
; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_1
; GFX12-FAKE16-NEXT: ; %bb.2:
@@ -8560,30 +8414,30 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX12-FAKE16-NEXT: ; =>This Loop Header: Depth=1
; GFX12-FAKE16-NEXT: ; Child Loop BB21_4 Depth 2
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7
; GFX12-FAKE16-NEXT: s_mov_b32 s2, exec_lo
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_dual_min_num_f32 v5, v5, v9 :: v_dual_min_num_f32 v4, v4, v8
-; GFX12-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_dual_min_num_f32 v6, v6, v9 :: v_dual_min_num_f32 v5, v5, v8
+; GFX12-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX12-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX12-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
+; GFX12-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX12-FAKE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
-; GFX12-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
+; GFX12-FAKE16-NEXT: v_perm_b32 v6, v6, v5, 0x7060302
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v5
; GFX12-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v7
; GFX12-FAKE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX12-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
; GFX12-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8598,14 +8452,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], null offen th:TH_ATOMIC_RETURN
+; GFX12-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], null offen offset:1024 th:TH_ATOMIC_RETURN
; GFX12-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_4
; GFX12-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX12-FAKE16-NEXT: s_mov_b32 exec_lo, s2
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v7, v5
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_DEV
; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -8613,14 +8467,13 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB21_3
; GFX12-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v5
; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX942-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX942: ; %bb.0:
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX942-NEXT: s_mov_b64 s[2:3], exec
; GFX942-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -8632,40 +8485,39 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX942-NEXT: v_cmp_eq_u64_e64 s[0:1], s[6:7], v[2:3]
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
-; GFX942-NEXT: buffer_load_dword v7, v4, s[4:7], 0 offen offset:1024
-; GFX942-NEXT: ; implicit-def: $vgpr4
+; GFX942-NEXT: buffer_load_dword v9, v4, s[4:7], 0 offen offset:1024
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB21_1
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: s_mov_b64 exec, s[2:3]
; GFX942-NEXT: s_mov_b64 s[2:3], 0
-; GFX942-NEXT: v_lshlrev_b32_e32 v9, 16, v5
+; GFX942-NEXT: v_lshlrev_b32_e32 v10, 16, v5
; GFX942-NEXT: s_movk_i32 s10, 0x7fff
-; GFX942-NEXT: v_and_b32_e32 v10, 0xffff0000, v5
+; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX942-NEXT: s_mov_b32 s11, 0x7060302
; GFX942-NEXT: .LBB21_3: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Loop Header: Depth=1
; GFX942-NEXT: ; Child Loop BB21_4 Depth 2
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v7
-; GFX942-NEXT: v_min_f32_e32 v4, v4, v9
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s10
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_lshlrev_b32_e32 v6, 16, v9
+; GFX942-NEXT: v_min_f32_e32 v6, v6, v10
+; GFX942-NEXT: v_bfe_u32 v7, v6, 16, 1
+; GFX942-NEXT: v_add3_u32 v7, v7, v6, s10
+; GFX942-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX942-NEXT: s_mov_b64 s[8:9], exec
; GFX942-NEXT: buffer_wbl2 sc1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v7
-; GFX942-NEXT: v_min_f32_e32 v5, v5, v10
-; GFX942-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX942-NEXT: v_add3_u32 v6, v6, v5, s10
-; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
+; GFX942-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
+; GFX942-NEXT: v_and_b32_e32 v7, 0xffff0000, v9
+; GFX942-NEXT: v_min_f32_e32 v7, v7, v5
+; GFX942-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX942-NEXT: v_add3_u32 v8, v8, v7, s10
+; GFX942-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc
-; GFX942-NEXT: v_perm_b32 v6, v5, v4, s11
-; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[6:7]
+; GFX942-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc
+; GFX942-NEXT: v_perm_b32 v8, v7, v6, s11
+; GFX942-NEXT: v_mov_b64_e32 v[6:7], v[8:9]
; GFX942-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX942-NEXT: ; => This Inner Loop Header: Depth=2
; GFX942-NEXT: v_readfirstlane_b32 s4, v0
@@ -8678,27 +8530,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX942-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GFX942-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[4:7], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[4:7], 0 offen offset:1024 sc0
; GFX942-NEXT: s_xor_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB21_4
; GFX942-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX942-NEXT: s_mov_b64 exec, s[8:9]
; GFX942-NEXT: s_waitcnt vmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v7, v4
+; GFX942-NEXT: v_mov_b32_e32 v9, v6
; GFX942-NEXT: buffer_inv sc1
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB21_3
; GFX942-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX942-NEXT: s_or_b64 exec, exec, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v0, v4
+; GFX942-NEXT: v_mov_b32_e32 v0, v6
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-TRUE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-TRUE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
@@ -8712,8 +8563,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-TRUE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
-; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-TRUE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024
; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_1
; GFX11-TRUE16-NEXT: ; %bb.2:
@@ -8726,28 +8576,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: ; =>This Loop Header: Depth=1
; GFX11-TRUE16-NEXT: ; Child Loop BB21_4 Depth 2
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7
; GFX11-TRUE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_min_f32 v5, v5, v8 :: v_dual_min_f32 v4, v4, v9
-; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_dual_min_f32 v6, v6, v8 :: v_dual_min_f32 v5, v5, v9
+; GFX11-TRUE16-NEXT: v_bfe_u32 v11, v6, 16, 1
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX11-TRUE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX11-TRUE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v6.l, v5.h
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v7
; GFX11-TRUE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX11-TRUE16-NEXT: ; => This Inner Loop Header: Depth=2
; GFX11-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8761,14 +8611,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-TRUE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc
; GFX11-TRUE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB21_4
; GFX11-TRUE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX11-TRUE16-NEXT: s_mov_b32 exec_lo, s2
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v7, v5
; GFX11-TRUE16-NEXT: buffer_gl1_inv
; GFX11-TRUE16-NEXT: buffer_gl0_inv
; GFX11-TRUE16-NEXT: s_or_b32 s1, vcc_lo, s1
@@ -8778,13 +8628,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX11-TRUE16-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX11-TRUE16-NEXT: s_set_inst_prefetch_distance 0x2
; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, v5
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-FAKE16-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-FAKE16-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
@@ -8798,8 +8647,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
-; GFX11-FAKE16-NEXT: buffer_load_b32 v6, v4, s[4:7], 0 offen offset:1024
-; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr4
+; GFX11-FAKE16-NEXT: buffer_load_b32 v7, v4, s[4:7], 0 offen offset:1024
; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_1
; GFX11-FAKE16-NEXT: ; %bb.2:
@@ -8812,28 +8660,28 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: ; =>This Loop Header: Depth=1
; GFX11-FAKE16-NEXT: ; Child Loop BB21_4 Depth 2
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v7
; GFX11-FAKE16-NEXT: s_mov_b32 s2, exec_lo
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_min_f32 v5, v5, v9 :: v_dual_min_f32 v4, v4, v8
-; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_dual_min_f32 v6, v6, v9 :: v_dual_min_f32 v5, v5, v8
+; GFX11-FAKE16-NEXT: v_bfe_u32 v11, v6, 16, 1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX11-FAKE16-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX11-FAKE16-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v5
+; GFX11-FAKE16-NEXT: v_perm_b32 v6, v6, v5, 0x7060302
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v5, v6
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v7
; GFX11-FAKE16-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX11-FAKE16-NEXT: ; => This Inner Loop Header: Depth=2
; GFX11-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
@@ -8847,14 +8695,14 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_saveexec_b32 s0, s0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[4:5], v7, s[4:7], 0 offen glc
+; GFX11-FAKE16-NEXT: buffer_atomic_cmpswap_b32 v[5:6], v4, s[4:7], 0 offen offset:1024 glc
; GFX11-FAKE16-NEXT: s_xor_b32 exec_lo, exec_lo, s0
; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB21_4
; GFX11-FAKE16-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX11-FAKE16-NEXT: s_mov_b32 exec_lo, s2
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v6, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v7, v5
; GFX11-FAKE16-NEXT: buffer_gl1_inv
; GFX11-FAKE16-NEXT: buffer_gl0_inv
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
@@ -8864,13 +8712,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX11-FAKE16-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX11-FAKE16-NEXT: s_set_inst_prefetch_distance 0x2
; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v5
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_add_nc_u32_e32 v7, 0x400, v4
; GFX10-NEXT: s_mov_b32 s5, 0
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
@@ -8882,8 +8729,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX10-NEXT: v_cmp_eq_u64_e64 s4, s[10:11], v[2:3]
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
-; GFX10-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX10-NEXT: ; implicit-def: $vgpr4
+; GFX10-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB21_1
@@ -8895,25 +8741,25 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX10-NEXT: ; =>This Loop Header: Depth=1
; GFX10-NEXT: ; Child Loop BB21_4 Depth 2
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
; GFX10-NEXT: s_mov_b32 s6, exec_lo
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: v_min_f32_e32 v4, v4, v8
-; GFX10-NEXT: v_min_f32_e32 v5, v5, v9
-; GFX10-NEXT: v_bfe_u32 v10, v4, 16, 1
-; GFX10-NEXT: v_bfe_u32 v11, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v5
-; GFX10-NEXT: v_add3_u32 v10, v10, v4, 0x7fff
-; GFX10-NEXT: v_add3_u32 v11, v11, v5, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v10, v12, vcc_lo
+; GFX10-NEXT: v_min_f32_e32 v5, v5, v8
+; GFX10-NEXT: v_min_f32_e32 v6, v6, v9
+; GFX10-NEXT: v_bfe_u32 v10, v5, 16, 1
+; GFX10-NEXT: v_bfe_u32 v11, v6, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_cndmask_b32_e32 v5, v11, v13, vcc_lo
-; GFX10-NEXT: v_perm_b32 v5, v5, v4, 0x7060302
-; GFX10-NEXT: v_mov_b32_e32 v4, v5
+; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v6
+; GFX10-NEXT: v_add3_u32 v10, v10, v5, 0x7fff
+; GFX10-NEXT: v_add3_u32 v11, v11, v6, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v5, v10, v12, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX10-NEXT: v_cndmask_b32_e32 v6, v11, v13, vcc_lo
+; GFX10-NEXT: v_perm_b32 v6, v6, v5, 0x7060302
; GFX10-NEXT: v_mov_b32_e32 v5, v6
+; GFX10-NEXT: v_mov_b32_e32 v6, v7
; GFX10-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX10-NEXT: ; => This Inner Loop Header: Depth=2
; GFX10-NEXT: v_readfirstlane_b32 s8, v0
@@ -8925,15 +8771,15 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX10-NEXT: s_and_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_and_saveexec_b32 s4, s4
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX10-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_xor_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB21_4
; GFX10-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX10-NEXT: s_mov_b32 exec_lo, s6
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v6
-; GFX10-NEXT: v_mov_b32_e32 v6, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX10-NEXT: v_mov_b32_e32 v7, v5
; GFX10-NEXT: buffer_gl1_inv
; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
@@ -8942,13 +8788,12 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX10-NEXT: s_cbranch_execnz .LBB21_3
; GFX10-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX10-NEXT: v_mov_b32_e32 v0, v4
+; GFX10-NEXT: v_mov_b32_e32 v0, v5
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX90A-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX90A: ; %bb.0:
; GFX90A-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX90A-NEXT: v_add_u32_e32 v8, 0x400, v4
; GFX90A-NEXT: s_mov_b64 s[6:7], exec
; GFX90A-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -8960,38 +8805,37 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_nop 0
-; GFX90A-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX90A-NEXT: ; implicit-def: $vgpr4
+; GFX90A-NEXT: buffer_load_dword v9, v4, s[8:11], 0 offen offset:1024
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB21_1
; GFX90A-NEXT: ; %bb.2:
; GFX90A-NEXT: s_mov_b64 exec, s[6:7]
; GFX90A-NEXT: s_mov_b64 s[6:7], 0
-; GFX90A-NEXT: v_lshlrev_b32_e32 v9, 16, v5
+; GFX90A-NEXT: v_lshlrev_b32_e32 v10, 16, v5
; GFX90A-NEXT: s_movk_i32 s14, 0x7fff
-; GFX90A-NEXT: v_and_b32_e32 v10, 0xffff0000, v5
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX90A-NEXT: s_mov_b32 s15, 0x7060302
; GFX90A-NEXT: .LBB21_3: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Loop Header: Depth=1
; GFX90A-NEXT: ; Child Loop BB21_4 Depth 2
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v7
-; GFX90A-NEXT: v_min_f32_e32 v4, v4, v9
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s14
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v7
-; GFX90A-NEXT: v_min_f32_e32 v5, v5, v10
-; GFX90A-NEXT: v_bfe_u32 v6, v5, 16, 1
-; GFX90A-NEXT: v_add3_u32 v6, v6, v5, s14
-; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v5
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cndmask_b32_e32 v5, v6, v11, vcc
-; GFX90A-NEXT: v_perm_b32 v6, v5, v4, s15
+; GFX90A-NEXT: v_lshlrev_b32_e32 v6, 16, v9
+; GFX90A-NEXT: v_min_f32_e32 v6, v6, v10
+; GFX90A-NEXT: v_bfe_u32 v7, v6, 16, 1
+; GFX90A-NEXT: v_add3_u32 v7, v7, v6, s14
+; GFX90A-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX90A-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
+; GFX90A-NEXT: v_and_b32_e32 v7, 0xffff0000, v9
+; GFX90A-NEXT: v_min_f32_e32 v7, v7, v5
+; GFX90A-NEXT: v_bfe_u32 v8, v7, 16, 1
+; GFX90A-NEXT: v_add3_u32 v8, v8, v7, s14
+; GFX90A-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
+; GFX90A-NEXT: v_cndmask_b32_e32 v7, v8, v11, vcc
+; GFX90A-NEXT: v_perm_b32 v8, v7, v6, s15
; GFX90A-NEXT: s_mov_b64 s[12:13], exec
-; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[6:7], v[6:7] op_sel:[0,1]
+; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[8:9], v[8:9] op_sel:[0,1]
; GFX90A-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2
; GFX90A-NEXT: v_readfirstlane_b32 s8, v0
@@ -9003,27 +8847,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX90A-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX90A-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: buffer_atomic_cmpswap v[4:5], v8, s[8:11], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[6:7], v4, s[8:11], 0 offen offset:1024 glc
; GFX90A-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB21_4
; GFX90A-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX90A-NEXT: s_mov_b64 exec, s[12:13]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v7
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v6, v9
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v7, v4
+; GFX90A-NEXT: v_mov_b32_e32 v9, v6
; GFX90A-NEXT: buffer_wbinvl1
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB21_3
; GFX90A-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX90A-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v0, v4
+; GFX90A-NEXT: v_mov_b32_e32 v0, v6
; GFX90A-NEXT: s_setpc_b64 s[30:31]
;
; GFX908-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX908: ; %bb.0:
; GFX908-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT: v_add_u32_e32 v7, 0x400, v4
; GFX908-NEXT: s_mov_b64 s[6:7], exec
; GFX908-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -9035,8 +8878,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_nop 0
-; GFX908-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX908-NEXT: ; implicit-def: $vgpr4
+; GFX908-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB21_1
; GFX908-NEXT: ; %bb.2:
@@ -9050,24 +8892,24 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX908-NEXT: ; =>This Loop Header: Depth=1
; GFX908-NEXT: ; Child Loop BB21_4 Depth 2
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX908-NEXT: v_min_f32_e32 v4, v4, v8
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s14
-; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX908-NEXT: v_min_f32_e32 v5, v5, v9
-; GFX908-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX908-NEXT: v_add3_u32 v10, v10, v5, s14
-; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX908-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX908-NEXT: v_min_f32_e32 v5, v5, v8
+; GFX908-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX908-NEXT: v_add3_u32 v6, v6, v5, s14
+; GFX908-NEXT: v_or_b32_e32 v10, 0x400000, v5
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc
-; GFX908-NEXT: v_perm_b32 v5, v5, v4, s15
-; GFX908-NEXT: v_mov_b32_e32 v4, v5
-; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc
+; GFX908-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX908-NEXT: v_min_f32_e32 v6, v6, v9
+; GFX908-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX908-NEXT: v_add3_u32 v10, v10, v6, s14
+; GFX908-NEXT: v_or_b32_e32 v11, 0x400000, v6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX908-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc
+; GFX908-NEXT: v_perm_b32 v6, v6, v5, s15
; GFX908-NEXT: v_mov_b32_e32 v5, v6
+; GFX908-NEXT: s_mov_b64 s[12:13], exec
+; GFX908-NEXT: v_mov_b32_e32 v6, v7
; GFX908-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX908-NEXT: ; => This Inner Loop Header: Depth=2
; GFX908-NEXT: v_readfirstlane_b32 s8, v0
@@ -9079,27 +8921,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX908-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX908-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX908-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB21_4
; GFX908-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX908-NEXT: s_mov_b64 exec, s[12:13]
; GFX908-NEXT: s_waitcnt vmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v6, v4
+; GFX908-NEXT: v_mov_b32_e32 v7, v5
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB21_3
; GFX908-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX908-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v0, v4
+; GFX908-NEXT: v_mov_b32_e32 v0, v5
; GFX908-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x400, v4
; GFX8-NEXT: s_mov_b64 s[6:7], exec
; GFX8-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -9111,8 +8952,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_nop 0
-; GFX8-NEXT: buffer_load_dword v6, v4, s[8:11], 0 offen offset:1024
-; GFX8-NEXT: ; implicit-def: $vgpr4
+; GFX8-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB21_1
; GFX8-NEXT: ; %bb.2:
@@ -9124,27 +8964,27 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX8-NEXT: ; =>This Loop Header: Depth=1
; GFX8-NEXT: ; Child Loop BB21_4 Depth 2
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX8-NEXT: v_min_f32_e32 v4, v4, v8
-; GFX8-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v4
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v10, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX8-NEXT: v_min_f32_e32 v5, v5, v9
-; GFX8-NEXT: v_bfe_u32 v10, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v5
-; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10
-; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v7
+; GFX8-NEXT: v_min_f32_e32 v5, v5, v8
+; GFX8-NEXT: v_bfe_u32 v6, v5, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v5
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v5, v5, v4, 16
-; GFX8-NEXT: v_mov_b32_e32 v4, v5
-; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v6, v10, vcc
+; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v7
+; GFX8-NEXT: v_min_f32_e32 v6, v6, v9
+; GFX8-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v6
+; GFX8-NEXT: v_add_u32_e32 v10, vcc, 0x7fff, v10
+; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v6
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX8-NEXT: v_alignbit_b32 v6, v6, v5, 16
; GFX8-NEXT: v_mov_b32_e32 v5, v6
+; GFX8-NEXT: s_mov_b64 s[12:13], exec
+; GFX8-NEXT: v_mov_b32_e32 v6, v7
; GFX8-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX8-NEXT: ; => This Inner Loop Header: Depth=2
; GFX8-NEXT: v_readfirstlane_b32 s8, v0
@@ -9156,27 +8996,26 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX8-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX8-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: buffer_atomic_cmpswap v[4:5], v7, s[8:11], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[5:6], v4, s[8:11], 0 offen offset:1024 glc
; GFX8-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB21_4
; GFX8-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX8-NEXT: s_mov_b64 exec, s[12:13]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v6
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v7, v5
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB21_3
; GFX8-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX8-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v0, v4
+; GFX8-NEXT: v_mov_b32_e32 v0, v5
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v8, vcc, 0x400, v4
; GFX7-NEXT: s_mov_b64 s[6:7], exec
; GFX7-NEXT: .LBB21_1: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -9187,8 +9026,7 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[10:11], v[2:3]
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
-; GFX7-NEXT: buffer_load_dword v7, v4, s[8:11], 0 offen offset:1024
-; GFX7-NEXT: ; implicit-def: $vgpr4
+; GFX7-NEXT: buffer_load_dword v8, v4, s[8:11], 0 offen offset:1024
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB21_1
; GFX7-NEXT: ; %bb.2:
@@ -9196,27 +9034,27 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v7
-; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8
+; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; GFX7-NEXT: s_mov_b64 s[6:7], 0
-; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v6
-; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v5
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v6
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v5
; GFX7-NEXT: .LBB21_3: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Loop Header: Depth=1
; GFX7-NEXT: ; Child Loop BB21_4 Depth 2
-; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v4
-; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v5
; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v7
-; GFX7-NEXT: v_min_f32_e32 v4, v4, v9
-; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v6
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
+; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v8
+; GFX7-NEXT: v_min_f32_e32 v5, v5, v10
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v7
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v6
; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_min_f32_e32 v7, v7, v10
-; GFX7-NEXT: v_alignbit_b32 v5, v5, v6, 16
-; GFX7-NEXT: v_alignbit_b32 v4, v4, v7, 16
-; GFX7-NEXT: v_mov_b32_e32 v7, v5
+; GFX7-NEXT: v_min_f32_e32 v8, v8, v11
+; GFX7-NEXT: v_alignbit_b32 v6, v6, v7, 16
+; GFX7-NEXT: v_alignbit_b32 v5, v5, v8, 16
+; GFX7-NEXT: v_mov_b32_e32 v9, v6
; GFX7-NEXT: s_mov_b64 s[12:13], exec
-; GFX7-NEXT: v_mov_b32_e32 v6, v4
+; GFX7-NEXT: v_mov_b32_e32 v8, v5
; GFX7-NEXT: .LBB21_4: ; Parent Loop BB21_3 Depth=1
; GFX7-NEXT: ; => This Inner Loop Header: Depth=2
; GFX7-NEXT: v_readfirstlane_b32 s8, v0
@@ -9228,23 +9066,23 @@ define <2 x bfloat> @buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterf
; GFX7-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7-NEXT: s_and_saveexec_b64 s[4:5], s[4:5]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: buffer_atomic_cmpswap v[6:7], v8, s[8:11], 0 offen glc
+; GFX7-NEXT: buffer_atomic_cmpswap v[8:9], v4, s[8:11], 0 offen offset:1024 glc
; GFX7-NEXT: s_xor_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB21_4
; GFX7-NEXT: ; %bb.5: ; in Loop: Header=BB21_3 Depth=1
; GFX7-NEXT: s_mov_b64 exec, s[12:13]
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v6, v5
-; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v6
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v8, v6
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v8
; GFX7-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v6
+; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX7-NEXT: s_cbranch_execnz .LBB21_3
; GFX7-NEXT: ; %bb.6: ; %atomicrmw.end
; GFX7-NEXT: s_or_b64 exec, exec, s[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v0, v7
-; GFX7-NEXT: v_mov_b32_e32 v1, v4
+; GFX7-NEXT: v_mov_b32_e32 v0, v8
+; GFX7-NEXT: v_mov_b32_e32 v1, v7
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX6-LABEL: buffer_fat_ptr_agent_atomic_fmin_ret_v2bf16__offset__waterfall__amdgpu_no_fine_grained_memory:
@@ -9353,10 +9191,9 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_
; GFX942-NEXT: v_mov_b32_e32 v1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, s16
; GFX942-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:1024
-; GFX942-NEXT: s_add_i32 s6, s16, 0x400
; GFX942-NEXT: s_mov_b64 s[4:5], 0
; GFX942-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX942-NEXT: v_mov_b32_e32 v3, s6
+; GFX942-NEXT: v_mov_b32_e32 v3, s16
; GFX942-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -9365,7 +9202,7 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_
; GFX942-NEXT: v_min_f32_e32 v4, v0, v2
; GFX942-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
; GFX942-NEXT: buffer_wbl2 sc0 sc1
-; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen sc0
+; GFX942-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[0:3], 0 offen offset:1024 sc0
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: buffer_inv sc0 sc1
; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -9404,10 +9241,9 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_
; GFX90A-NEXT: v_mov_b32_e32 v1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, s20
; GFX90A-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX90A-NEXT: s_add_i32 s6, s20, 0x400
; GFX90A-NEXT: s_mov_b64 s[4:5], 0
; GFX90A-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX90A-NEXT: v_mov_b32_e32 v3, s6
+; GFX90A-NEXT: v_mov_b32_e32 v3, s20
; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -9416,7 +9252,7 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_
; GFX90A-NEXT: v_min_f32_e32 v4, v0, v2
; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[4:5] op_sel:[0,1]
; GFX90A-NEXT: buffer_wbl2
-; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX90A-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: buffer_invl2
; GFX90A-NEXT: buffer_wbinvl1
@@ -9434,10 +9270,9 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: v_mov_b32_e32 v1, v0
; GFX908-NEXT: v_mov_b32_e32 v0, s20
; GFX908-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX908-NEXT: s_add_i32 s6, s20, 0x400
; GFX908-NEXT: s_mov_b64 s[4:5], 0
; GFX908-NEXT: v_max_f32_e32 v2, v1, v1
-; GFX908-NEXT: v_mov_b32_e32 v3, s6
+; GFX908-NEXT: v_mov_b32_e32 v3, s20
; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt vmcnt(0)
@@ -9446,7 +9281,7 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_
; GFX908-NEXT: v_min_f32_e32 v4, v0, v2
; GFX908-NEXT: v_mov_b32_e32 v0, v4
; GFX908-NEXT: v_mov_b32_e32 v1, v5
-; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX908-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX908-NEXT: s_waitcnt vmcnt(0)
; GFX908-NEXT: buffer_wbinvl1
; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
@@ -9463,10 +9298,9 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: v_mov_b32_e32 v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, s20
; GFX8-NEXT: buffer_load_dword v0, v0, s[16:19], 0 offen offset:1024
-; GFX8-NEXT: s_add_i32 s6, s20, 0x400
; GFX8-NEXT: s_mov_b64 s[4:5], 0
; GFX8-NEXT: v_mul_f32_e32 v2, 1.0, v1
-; GFX8-NEXT: v_mov_b32_e32 v3, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s20
; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -9475,7 +9309,7 @@ define float @buffer_fat_ptr_system_atomic_fmin_ret_f32__offset__amdgpu_no_fine_
; GFX8-NEXT: v_min_f32_e32 v4, v0, v2
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
-; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen glc
+; GFX8-NEXT: buffer_atomic_cmpswap v[0:1], v3, s[16:19], 0 offen offset:1024 glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: buffer_wbinvl1
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll
index 3c991cf..afd0f01 100644
--- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll
+++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll
@@ -782,69 +782,90 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp
; SDAG-GFX942-LABEL: memcpy_known_medium:
; SDAG-GFX942: ; %bb.0:
; SDAG-GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; SDAG-GFX942-NEXT: s_load_dword s13, s[4:5], 0x34
+; SDAG-GFX942-NEXT: s_load_dword s17, s[4:5], 0x34
; SDAG-GFX942-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x44
-; SDAG-GFX942-NEXT: s_load_dword s14, s[4:5], 0x54
-; SDAG-GFX942-NEXT: s_mov_b32 s12, 0
-; SDAG-GFX942-NEXT: s_mov_b32 s5, s12
-; SDAG-GFX942-NEXT: v_mov_b32_e32 v0, 0
+; SDAG-GFX942-NEXT: s_load_dword s12, s[4:5], 0x54
+; SDAG-GFX942-NEXT: s_mov_b32 s16, 0
+; SDAG-GFX942-NEXT: s_mov_b32 s5, s16
; SDAG-GFX942-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-GFX942-NEXT: s_mov_b32 s4, s3
-; SDAG-GFX942-NEXT: s_or_b64 s[6:7], s[4:5], s[12:13]
-; SDAG-GFX942-NEXT: s_mov_b32 s13, s2
+; SDAG-GFX942-NEXT: s_or_b64 s[6:7], s[4:5], s[16:17]
+; SDAG-GFX942-NEXT: s_mov_b32 s17, s2
; SDAG-GFX942-NEXT: s_mov_b32 s2, s1
-; SDAG-GFX942-NEXT: s_mov_b32 s3, s12
-; SDAG-GFX942-NEXT: s_or_b64 s[4:5], s[2:3], s[12:13]
-; SDAG-GFX942-NEXT: s_mov_b32 s13, s14
+; SDAG-GFX942-NEXT: s_mov_b32 s3, s16
+; SDAG-GFX942-NEXT: s_or_b64 s[4:5], s[2:3], s[16:17]
+; SDAG-GFX942-NEXT: s_mov_b32 s17, s12
; SDAG-GFX942-NEXT: s_mov_b32 s2, s11
-; SDAG-GFX942-NEXT: s_or_b64 s[14:15], s[2:3], s[12:13]
-; SDAG-GFX942-NEXT: s_mov_b32 s13, s10
+; SDAG-GFX942-NEXT: s_or_b64 s[14:15], s[2:3], s[16:17]
+; SDAG-GFX942-NEXT: s_mov_b32 s17, s10
; SDAG-GFX942-NEXT: s_mov_b32 s2, s9
-; SDAG-GFX942-NEXT: s_or_b64 s[12:13], s[2:3], s[12:13]
+; SDAG-GFX942-NEXT: s_or_b64 s[12:13], s[2:3], s[16:17]
; SDAG-GFX942-NEXT: .LBB1_1: ; %load-store-loop
; SDAG-GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
-; SDAG-GFX942-NEXT: v_add_u32_e32 v1, s0, v0
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[2:5], v1, s[4:7], 0 offen
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[6:9], v1, s[4:7], 0 offen offset:16
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[10:13], v1, s[4:7], 0 offen offset:32
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[14:17], v1, s[4:7], 0 offen offset:48
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[18:21], v1, s[4:7], 0 offen offset:64
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[22:25], v1, s[4:7], 0 offen offset:80
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[26:29], v1, s[4:7], 0 offen offset:96
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[30:33], v1, s[4:7], 0 offen offset:112
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[34:37], v1, s[4:7], 0 offen offset:128
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[38:41], v1, s[4:7], 0 offen offset:144
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[42:45], v1, s[4:7], 0 offen offset:160
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[46:49], v1, s[4:7], 0 offen offset:176
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[50:53], v1, s[4:7], 0 offen offset:192
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[54:57], v1, s[4:7], 0 offen offset:208
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[58:61], v1, s[4:7], 0 offen offset:224
-; SDAG-GFX942-NEXT: buffer_load_dwordx4 a[0:3], v1, s[4:7], 0 offen offset:240
-; SDAG-GFX942-NEXT: v_add_u32_e32 v62, s8, v0
-; SDAG-GFX942-NEXT: v_add_co_u32_e32 v0, vcc, 0x100, v0
-; SDAG-GFX942-NEXT: s_and_b64 vcc, exec, vcc
-; SDAG-GFX942-NEXT: s_waitcnt vmcnt(0)
-; SDAG-GFX942-NEXT: v_accvgpr_read_b32 v63, a3 ; Reload Reuse
-; SDAG-GFX942-NEXT: scratch_store_dwordx3 off, a[0:2], off ; 12-byte Folded Spill
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v62, s[12:15], 0 offen
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[6:9], v62, s[12:15], 0 offen offset:16
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[10:13], v62, s[12:15], 0 offen offset:32
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[14:17], v62, s[12:15], 0 offen offset:48
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[18:21], v62, s[12:15], 0 offen offset:64
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[22:25], v62, s[12:15], 0 offen offset:80
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[26:29], v62, s[12:15], 0 offen offset:96
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[30:33], v62, s[12:15], 0 offen offset:112
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[34:37], v62, s[12:15], 0 offen offset:128
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[38:41], v62, s[12:15], 0 offen offset:144
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[42:45], v62, s[12:15], 0 offen offset:160
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[46:49], v62, s[12:15], 0 offen offset:176
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[50:53], v62, s[12:15], 0 offen offset:192
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[54:57], v62, s[12:15], 0 offen offset:208
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[58:61], v62, s[12:15], 0 offen offset:224
-; SDAG-GFX942-NEXT: scratch_load_dwordx3 v[2:4], off, off ; 12-byte Folded Reload
+; SDAG-GFX942-NEXT: s_add_i32 s1, s0, s16
+; SDAG-GFX942-NEXT: v_mov_b32_e32 v60, s1
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[8:11], v60, s[4:7], 0 offen
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[4:7], v60, s[4:7], 0 offen offset:16
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[12:15], v60, s[4:7], 0 offen offset:32
+; SDAG-GFX942-NEXT: s_add_i32 s2, s8, s16
+; SDAG-GFX942-NEXT: v_mov_b32_e32 v0, s2
+; SDAG-GFX942-NEXT: s_addk_i32 s16, 0x100
+; SDAG-GFX942-NEXT: s_cmpk_lt_u32 s16, 0x100
; SDAG-GFX942-NEXT: s_waitcnt vmcnt(0)
-; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v62, s[12:15], 0 offen offset:240
-; SDAG-GFX942-NEXT: s_cbranch_vccnz .LBB1_1
+; SDAG-GFX942-NEXT: v_accvgpr_write_b32 a0, v15 ; Reload Reuse
+; SDAG-GFX942-NEXT: v_accvgpr_write_b32 a1, v14 ; Reload Reuse
+; SDAG-GFX942-NEXT: v_accvgpr_write_b32 a2, v13 ; Reload Reuse
+; SDAG-GFX942-NEXT: v_accvgpr_write_b32 a3, v12 ; Reload Reuse
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[12:15], v60, s[4:7], 0 offen offset:48
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[16:19], v60, s[4:7], 0 offen offset:64
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[20:23], v60, s[4:7], 0 offen offset:80
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[24:27], v60, s[4:7], 0 offen offset:96
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[28:31], v60, s[4:7], 0 offen offset:112
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[32:35], v60, s[4:7], 0 offen offset:128
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[36:39], v60, s[4:7], 0 offen offset:144
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[40:43], v60, s[4:7], 0 offen offset:160
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[44:47], v60, s[4:7], 0 offen offset:176
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[48:51], v60, s[4:7], 0 offen offset:192
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[52:55], v60, s[4:7], 0 offen offset:208
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[56:59], v60, s[4:7], 0 offen offset:224
+; SDAG-GFX942-NEXT: s_nop 0
+; SDAG-GFX942-NEXT: buffer_load_dwordx4 v[60:63], v60, s[4:7], 0 offen offset:240
+; SDAG-GFX942-NEXT: s_nop 0
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[8:11], v0, s[12:15], 0 offen
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[4:7], v0, s[12:15], 0 offen offset:16
+; SDAG-GFX942-NEXT: s_nop 1
+; SDAG-GFX942-NEXT: v_accvgpr_read_b32 v5, a0 ; Reload Reuse
+; SDAG-GFX942-NEXT: v_accvgpr_read_b32 v4, a1 ; Reload Reuse
+; SDAG-GFX942-NEXT: v_accvgpr_read_b32 v3, a2 ; Reload Reuse
+; SDAG-GFX942-NEXT: v_accvgpr_read_b32 v2, a3 ; Reload Reuse
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v0, s[12:15], 0 offen offset:32
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[12:15], v0, s[12:15], 0 offen offset:48
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[16:19], v0, s[12:15], 0 offen offset:64
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[20:23], v0, s[12:15], 0 offen offset:80
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[24:27], v0, s[12:15], 0 offen offset:96
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[28:31], v0, s[12:15], 0 offen offset:112
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[32:35], v0, s[12:15], 0 offen offset:128
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[36:39], v0, s[12:15], 0 offen offset:144
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[40:43], v0, s[12:15], 0 offen offset:160
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[44:47], v0, s[12:15], 0 offen offset:176
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[48:51], v0, s[12:15], 0 offen offset:192
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[52:55], v0, s[12:15], 0 offen offset:208
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[56:59], v0, s[12:15], 0 offen offset:224
+; SDAG-GFX942-NEXT: s_waitcnt vmcnt(15)
+; SDAG-GFX942-NEXT: buffer_store_dwordx4 v[60:63], v0, s[12:15], 0 offen offset:240
+; SDAG-GFX942-NEXT: s_cbranch_scc1 .LBB1_1
; SDAG-GFX942-NEXT: ; %bb.2: ; %memcpy-split
; SDAG-GFX942-NEXT: s_endpgm
;
@@ -852,84 +873,87 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp
; SDAG-GFX1100: ; %bb.0:
; SDAG-GFX1100-NEXT: s_clause 0x3
; SDAG-GFX1100-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; SDAG-GFX1100-NEXT: s_load_b32 s13, s[4:5], 0x34
+; SDAG-GFX1100-NEXT: s_load_b32 s17, s[4:5], 0x34
; SDAG-GFX1100-NEXT: s_load_b128 s[8:11], s[4:5], 0x44
; SDAG-GFX1100-NEXT: s_load_b32 s18, s[4:5], 0x54
-; SDAG-GFX1100-NEXT: s_mov_b32 s12, 0
-; SDAG-GFX1100-NEXT: v_mov_b32_e32 v0, 0
-; SDAG-GFX1100-NEXT: s_mov_b32 s5, s12
-; SDAG-GFX1100-NEXT: s_mov_b32 s15, s12
-; SDAG-GFX1100-NEXT: s_mov_b32 s17, s12
+; SDAG-GFX1100-NEXT: s_mov_b32 s16, 0
+; SDAG-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; SDAG-GFX1100-NEXT: s_mov_b32 s5, s16
+; SDAG-GFX1100-NEXT: s_mov_b32 s13, s16
+; SDAG-GFX1100-NEXT: s_mov_b32 s15, s16
; SDAG-GFX1100-NEXT: s_waitcnt lgkmcnt(0)
; SDAG-GFX1100-NEXT: s_mov_b32 s4, s3
-; SDAG-GFX1100-NEXT: s_mov_b32 s14, s1
-; SDAG-GFX1100-NEXT: s_or_b64 s[6:7], s[4:5], s[12:13]
-; SDAG-GFX1100-NEXT: s_mov_b32 s13, s2
-; SDAG-GFX1100-NEXT: s_mov_b32 s16, s11
-; SDAG-GFX1100-NEXT: s_or_b64 s[4:5], s[14:15], s[12:13]
-; SDAG-GFX1100-NEXT: s_mov_b32 s13, s18
+; SDAG-GFX1100-NEXT: s_mov_b32 s12, s1
+; SDAG-GFX1100-NEXT: s_or_b64 s[6:7], s[4:5], s[16:17]
+; SDAG-GFX1100-NEXT: s_mov_b32 s17, s2
+; SDAG-GFX1100-NEXT: s_mov_b32 s14, s11
+; SDAG-GFX1100-NEXT: s_or_b64 s[4:5], s[12:13], s[16:17]
+; SDAG-GFX1100-NEXT: s_mov_b32 s17, s18
; SDAG-GFX1100-NEXT: s_mov_b32 s2, s9
-; SDAG-GFX1100-NEXT: s_or_b64 s[14:15], s[16:17], s[12:13]
-; SDAG-GFX1100-NEXT: s_mov_b32 s13, s10
-; SDAG-GFX1100-NEXT: s_mov_b32 s3, s12
+; SDAG-GFX1100-NEXT: s_or_b64 s[14:15], s[14:15], s[16:17]
+; SDAG-GFX1100-NEXT: s_mov_b32 s17, s10
+; SDAG-GFX1100-NEXT: s_mov_b32 s3, s16
; SDAG-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; SDAG-GFX1100-NEXT: s_or_b64 s[12:13], s[2:3], s[12:13]
+; SDAG-GFX1100-NEXT: s_or_b64 s[12:13], s[2:3], s[16:17]
; SDAG-GFX1100-NEXT: .LBB1_1: ; %load-store-loop
; SDAG-GFX1100-NEXT: ; =>This Inner Loop Header: Depth=1
-; SDAG-GFX1100-NEXT: v_add_nc_u32_e32 v61, s0, v0
-; SDAG-GFX1100-NEXT: v_add_nc_u32_e32 v65, s8, v0
-; SDAG-GFX1100-NEXT: v_add_co_u32 v0, s1, 0x100, v0
-; SDAG-GFX1100-NEXT: s_and_b32 vcc_lo, exec_lo, s1
+; SDAG-GFX1100-NEXT: s_add_i32 s1, s0, s16
+; SDAG-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; SDAG-GFX1100-NEXT: v_mov_b32_e32 v60, s1
+; SDAG-GFX1100-NEXT: s_add_i32 s1, s8, s16
+; SDAG-GFX1100-NEXT: s_addk_i32 s16, 0x100
+; SDAG-GFX1100-NEXT: v_mov_b32_e32 v64, s1
+; SDAG-GFX1100-NEXT: s_cmpk_lt_u32 s16, 0x100
; SDAG-GFX1100-NEXT: s_clause 0xf
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[1:4], v61, s[4:7], 0 offen
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[5:8], v61, s[4:7], 0 offen offset:16
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[9:12], v61, s[4:7], 0 offen offset:32
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[13:16], v61, s[4:7], 0 offen offset:48
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[17:20], v61, s[4:7], 0 offen offset:64
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[21:24], v61, s[4:7], 0 offen offset:80
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[25:28], v61, s[4:7], 0 offen offset:96
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[29:32], v61, s[4:7], 0 offen offset:112
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[33:36], v61, s[4:7], 0 offen offset:128
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[37:40], v61, s[4:7], 0 offen offset:144
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[41:44], v61, s[4:7], 0 offen offset:160
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[45:48], v61, s[4:7], 0 offen offset:176
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[49:52], v61, s[4:7], 0 offen offset:192
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[53:56], v61, s[4:7], 0 offen offset:208
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[57:60], v61, s[4:7], 0 offen offset:224
-; SDAG-GFX1100-NEXT: buffer_load_b128 v[61:64], v61, s[4:7], 0 offen offset:240
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[0:3], v60, s[4:7], 0 offen
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[4:7], v60, s[4:7], 0 offen offset:16
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[8:11], v60, s[4:7], 0 offen offset:32
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[12:15], v60, s[4:7], 0 offen offset:48
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[16:19], v60, s[4:7], 0 offen offset:64
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[20:23], v60, s[4:7], 0 offen offset:80
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[24:27], v60, s[4:7], 0 offen offset:96
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[28:31], v60, s[4:7], 0 offen offset:112
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[32:35], v60, s[4:7], 0 offen offset:128
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[36:39], v60, s[4:7], 0 offen offset:144
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[40:43], v60, s[4:7], 0 offen offset:160
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[44:47], v60, s[4:7], 0 offen offset:176
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[48:51], v60, s[4:7], 0 offen offset:192
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[52:55], v60, s[4:7], 0 offen offset:208
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[56:59], v60, s[4:7], 0 offen offset:224
+; SDAG-GFX1100-NEXT: buffer_load_b128 v[60:63], v60, s[4:7], 0 offen offset:240
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(15)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[1:4], v65, s[12:15], 0 offen
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[0:3], v64, s[12:15], 0 offen
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(14)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[5:8], v65, s[12:15], 0 offen offset:16
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[4:7], v64, s[12:15], 0 offen offset:16
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(13)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[9:12], v65, s[12:15], 0 offen offset:32
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[8:11], v64, s[12:15], 0 offen offset:32
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(12)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[13:16], v65, s[12:15], 0 offen offset:48
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[12:15], v64, s[12:15], 0 offen offset:48
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(11)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[17:20], v65, s[12:15], 0 offen offset:64
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[16:19], v64, s[12:15], 0 offen offset:64
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(10)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[21:24], v65, s[12:15], 0 offen offset:80
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[20:23], v64, s[12:15], 0 offen offset:80
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(9)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[25:28], v65, s[12:15], 0 offen offset:96
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[24:27], v64, s[12:15], 0 offen offset:96
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(8)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[29:32], v65, s[12:15], 0 offen offset:112
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[28:31], v64, s[12:15], 0 offen offset:112
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(7)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[33:36], v65, s[12:15], 0 offen offset:128
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[32:35], v64, s[12:15], 0 offen offset:128
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(6)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[37:40], v65, s[12:15], 0 offen offset:144
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[36:39], v64, s[12:15], 0 offen offset:144
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(5)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[41:44], v65, s[12:15], 0 offen offset:160
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[40:43], v64, s[12:15], 0 offen offset:160
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(4)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[45:48], v65, s[12:15], 0 offen offset:176
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[44:47], v64, s[12:15], 0 offen offset:176
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(3)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[49:52], v65, s[12:15], 0 offen offset:192
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[48:51], v64, s[12:15], 0 offen offset:192
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(2)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[53:56], v65, s[12:15], 0 offen offset:208
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[52:55], v64, s[12:15], 0 offen offset:208
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(1)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[57:60], v65, s[12:15], 0 offen offset:224
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[56:59], v64, s[12:15], 0 offen offset:224
; SDAG-GFX1100-NEXT: s_waitcnt vmcnt(0)
-; SDAG-GFX1100-NEXT: buffer_store_b128 v[61:64], v65, s[12:15], 0 offen offset:240
-; SDAG-GFX1100-NEXT: s_cbranch_vccnz .LBB1_1
+; SDAG-GFX1100-NEXT: buffer_store_b128 v[60:63], v64, s[12:15], 0 offen offset:240
+; SDAG-GFX1100-NEXT: s_cbranch_scc1 .LBB1_1
; SDAG-GFX1100-NEXT: ; %bb.2: ; %memcpy-split
; SDAG-GFX1100-NEXT: s_endpgm
;
@@ -957,52 +981,50 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp
; GISEL-GFX942-NEXT: s_mov_b32 s2, s7
; GISEL-GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GISEL-GFX942-NEXT: s_or_b64 s[6:7], s[6:7], s[2:3]
-; GISEL-GFX942-NEXT: v_mov_b32_e32 v0, s16
+; GISEL-GFX942-NEXT: v_mov_b32_e32 v0, 0x100
+; GISEL-GFX942-NEXT: v_mov_b32_e32 v1, s16
; GISEL-GFX942-NEXT: .LBB1_1: ; %load-store-loop
; GISEL-GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
-; GISEL-GFX942-NEXT: v_add_u32_e32 v1, s0, v0
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[2:5], v1, s[8:11], 0 offen
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[6:9], v1, s[8:11], 0 offen offset:16
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[10:13], v1, s[8:11], 0 offen offset:32
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[14:17], v1, s[8:11], 0 offen offset:48
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[18:21], v1, s[8:11], 0 offen offset:64
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[22:25], v1, s[8:11], 0 offen offset:80
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[26:29], v1, s[8:11], 0 offen offset:96
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[30:33], v1, s[8:11], 0 offen offset:112
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[34:37], v1, s[8:11], 0 offen offset:128
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[38:41], v1, s[8:11], 0 offen offset:144
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[42:45], v1, s[8:11], 0 offen offset:160
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[46:49], v1, s[8:11], 0 offen offset:176
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[50:53], v1, s[8:11], 0 offen offset:192
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[54:57], v1, s[8:11], 0 offen offset:208
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[58:61], v1, s[8:11], 0 offen offset:224
-; GISEL-GFX942-NEXT: buffer_load_dwordx4 a[0:3], v1, s[8:11], 0 offen offset:240
-; GISEL-GFX942-NEXT: v_add_u32_e32 v62, s12, v0
-; GISEL-GFX942-NEXT: v_add_co_u32_e32 v0, vcc, 0x100, v0
-; GISEL-GFX942-NEXT: s_xor_b64 s[2:3], vcc, -1
-; GISEL-GFX942-NEXT: s_xor_b64 s[2:3], s[2:3], -1
-; GISEL-GFX942-NEXT: s_and_b64 vcc, s[2:3], exec
+; GISEL-GFX942-NEXT: v_add_u32_e32 v62, s0, v1
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[2:5], v62, s[8:11], 0 offen
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[6:9], v62, s[8:11], 0 offen offset:16
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[10:13], v62, s[8:11], 0 offen offset:32
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[14:17], v62, s[8:11], 0 offen offset:48
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[18:21], v62, s[8:11], 0 offen offset:64
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[22:25], v62, s[8:11], 0 offen offset:80
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[26:29], v62, s[8:11], 0 offen offset:96
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[30:33], v62, s[8:11], 0 offen offset:112
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[34:37], v62, s[8:11], 0 offen offset:128
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[38:41], v62, s[8:11], 0 offen offset:144
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[42:45], v62, s[8:11], 0 offen offset:160
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[46:49], v62, s[8:11], 0 offen offset:176
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[50:53], v62, s[8:11], 0 offen offset:192
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[54:57], v62, s[8:11], 0 offen offset:208
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[58:61], v62, s[8:11], 0 offen offset:224
+; GISEL-GFX942-NEXT: buffer_load_dwordx4 a[0:3], v62, s[8:11], 0 offen offset:240
+; GISEL-GFX942-NEXT: v_add_u32_e32 v63, s12, v1
+; GISEL-GFX942-NEXT: v_add_u32_e32 v1, 0x100, v1
+; GISEL-GFX942-NEXT: v_cmp_lt_u32_e32 vcc, v1, v0
; GISEL-GFX942-NEXT: s_waitcnt vmcnt(0)
-; GISEL-GFX942-NEXT: v_accvgpr_read_b32 v63, a3 ; Reload Reuse
-; GISEL-GFX942-NEXT: scratch_store_dwordx3 off, a[0:2], off ; 12-byte Folded Spill
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v62, s[4:7], 0 offen
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[6:9], v62, s[4:7], 0 offen offset:16
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[10:13], v62, s[4:7], 0 offen offset:32
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[14:17], v62, s[4:7], 0 offen offset:48
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[18:21], v62, s[4:7], 0 offen offset:64
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[22:25], v62, s[4:7], 0 offen offset:80
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[26:29], v62, s[4:7], 0 offen offset:96
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[30:33], v62, s[4:7], 0 offen offset:112
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[34:37], v62, s[4:7], 0 offen offset:128
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[38:41], v62, s[4:7], 0 offen offset:144
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[42:45], v62, s[4:7], 0 offen offset:160
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[46:49], v62, s[4:7], 0 offen offset:176
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[50:53], v62, s[4:7], 0 offen offset:192
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[54:57], v62, s[4:7], 0 offen offset:208
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[58:61], v62, s[4:7], 0 offen offset:224
-; GISEL-GFX942-NEXT: scratch_load_dwordx3 v[2:4], off, off ; 12-byte Folded Reload
+; GISEL-GFX942-NEXT: scratch_store_dwordx4 off, a[0:3], off ; 16-byte Folded Spill
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v63, s[4:7], 0 offen
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[6:9], v63, s[4:7], 0 offen offset:16
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[10:13], v63, s[4:7], 0 offen offset:32
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[14:17], v63, s[4:7], 0 offen offset:48
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[18:21], v63, s[4:7], 0 offen offset:64
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[22:25], v63, s[4:7], 0 offen offset:80
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[26:29], v63, s[4:7], 0 offen offset:96
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[30:33], v63, s[4:7], 0 offen offset:112
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[34:37], v63, s[4:7], 0 offen offset:128
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[38:41], v63, s[4:7], 0 offen offset:144
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[42:45], v63, s[4:7], 0 offen offset:160
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[46:49], v63, s[4:7], 0 offen offset:176
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[50:53], v63, s[4:7], 0 offen offset:192
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[54:57], v63, s[4:7], 0 offen offset:208
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[58:61], v63, s[4:7], 0 offen offset:224
+; GISEL-GFX942-NEXT: scratch_load_dwordx4 v[2:5], off, off ; 16-byte Folded Reload
; GISEL-GFX942-NEXT: s_waitcnt vmcnt(0)
-; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v62, s[4:7], 0 offen offset:240
+; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[2:5], v63, s[4:7], 0 offen offset:240
; GISEL-GFX942-NEXT: s_cbranch_vccnz .LBB1_1
; GISEL-GFX942-NEXT: ; %bb.2: ; %memcpy-split
; GISEL-GFX942-NEXT: s_endpgm
@@ -1037,8 +1059,7 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp
; GISEL-GFX1100-NEXT: ; =>This Inner Loop Header: Depth=1
; GISEL-GFX1100-NEXT: v_add_nc_u32_e32 v61, s0, v0
; GISEL-GFX1100-NEXT: v_add_nc_u32_e32 v65, s8, v0
-; GISEL-GFX1100-NEXT: v_add_co_u32 v0, s1, 0x100, v0
-; GISEL-GFX1100-NEXT: s_xor_b32 s1, s1, -1
+; GISEL-GFX1100-NEXT: v_add_nc_u32_e32 v0, 0x100, v0
; GISEL-GFX1100-NEXT: s_clause 0xf
; GISEL-GFX1100-NEXT: buffer_load_b128 v[1:4], v61, s[4:7], 0 offen
; GISEL-GFX1100-NEXT: buffer_load_b128 v[5:8], v61, s[4:7], 0 offen offset:16
@@ -1056,7 +1077,6 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp
; GISEL-GFX1100-NEXT: buffer_load_b128 v[53:56], v61, s[4:7], 0 offen offset:208
; GISEL-GFX1100-NEXT: buffer_load_b128 v[57:60], v61, s[4:7], 0 offen offset:224
; GISEL-GFX1100-NEXT: buffer_load_b128 v[61:64], v61, s[4:7], 0 offen offset:240
-; GISEL-GFX1100-NEXT: s_xor_b32 s1, s1, -1
; GISEL-GFX1100-NEXT: s_waitcnt vmcnt(15)
; GISEL-GFX1100-NEXT: buffer_store_b128 v[1:4], v65, s[12:15], 0 offen
; GISEL-GFX1100-NEXT: s_waitcnt vmcnt(14)
@@ -1089,7 +1109,7 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp
; GISEL-GFX1100-NEXT: buffer_store_b128 v[57:60], v65, s[12:15], 0 offen offset:224
; GISEL-GFX1100-NEXT: s_waitcnt vmcnt(0)
; GISEL-GFX1100-NEXT: buffer_store_b128 v[61:64], v65, s[12:15], 0 offen offset:240
-; GISEL-GFX1100-NEXT: s_and_b32 vcc_lo, exec_lo, s1
+; GISEL-GFX1100-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x100, v0
; GISEL-GFX1100-NEXT: s_cbranch_vccnz .LBB1_1
; GISEL-GFX1100-NEXT: ; %bb.2: ; %memcpy-split
; GISEL-GFX1100-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/div_v2i128.ll b/llvm/test/CodeGen/AMDGPU/div_v2i128.ll
index 5134159..0fc54ae 100644
--- a/llvm/test/CodeGen/AMDGPU/div_v2i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/div_v2i128.ll
@@ -619,43 +619,43 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: s_mov_b64 s[8:9], 0
; GISEL-NEXT: v_ashrrev_i32_e32 v18, 31, v7
; GISEL-NEXT: v_ashrrev_i32_e32 v19, 31, v15
-; GISEL-NEXT: v_mov_b32_e32 v10, 0x7f
-; GISEL-NEXT: v_mov_b32_e32 v11, 0
+; GISEL-NEXT: v_mov_b32_e32 v16, 0x7f
+; GISEL-NEXT: v_mov_b32_e32 v17, 0
; GISEL-NEXT: v_xor_b32_e32 v0, v18, v4
; GISEL-NEXT: v_xor_b32_e32 v1, v18, v5
; GISEL-NEXT: v_xor_b32_e32 v2, v18, v6
; GISEL-NEXT: v_xor_b32_e32 v3, v18, v7
; GISEL-NEXT: v_xor_b32_e32 v4, v19, v12
; GISEL-NEXT: v_xor_b32_e32 v5, v19, v13
-; GISEL-NEXT: v_xor_b32_e32 v14, v19, v14
-; GISEL-NEXT: v_xor_b32_e32 v15, v19, v15
+; GISEL-NEXT: v_xor_b32_e32 v12, v19, v14
+; GISEL-NEXT: v_xor_b32_e32 v13, v19, v15
; GISEL-NEXT: v_sub_i32_e32 v6, vcc, v0, v18
; GISEL-NEXT: v_subb_u32_e32 v7, vcc, v1, v18, vcc
; GISEL-NEXT: v_sub_i32_e64 v20, s[4:5], v4, v19
; GISEL-NEXT: v_subb_u32_e64 v21, s[4:5], v5, v19, s[4:5]
-; GISEL-NEXT: v_subb_u32_e32 v12, vcc, v2, v18, vcc
-; GISEL-NEXT: v_subb_u32_e32 v13, vcc, v3, v18, vcc
-; GISEL-NEXT: v_subb_u32_e64 v4, vcc, v14, v19, s[4:5]
-; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v15, v19, vcc
-; GISEL-NEXT: v_ffbh_u32_e32 v14, v21
-; GISEL-NEXT: v_ffbh_u32_e32 v15, v20
-; GISEL-NEXT: v_ffbh_u32_e32 v16, v7
-; GISEL-NEXT: v_ffbh_u32_e32 v17, v6
+; GISEL-NEXT: v_subb_u32_e32 v10, vcc, v2, v18, vcc
+; GISEL-NEXT: v_subb_u32_e32 v11, vcc, v3, v18, vcc
+; GISEL-NEXT: v_subb_u32_e64 v4, vcc, v12, v19, s[4:5]
+; GISEL-NEXT: v_subb_u32_e32 v5, vcc, v13, v19, vcc
+; GISEL-NEXT: v_ffbh_u32_e32 v12, v21
+; GISEL-NEXT: v_ffbh_u32_e32 v13, v20
+; GISEL-NEXT: v_ffbh_u32_e32 v14, v7
+; GISEL-NEXT: v_ffbh_u32_e32 v15, v6
; GISEL-NEXT: v_or_b32_e32 v0, v20, v4
; GISEL-NEXT: v_or_b32_e32 v1, v21, v5
-; GISEL-NEXT: v_or_b32_e32 v2, v6, v12
-; GISEL-NEXT: v_or_b32_e32 v3, v7, v13
-; GISEL-NEXT: v_add_i32_e32 v15, vcc, 32, v15
+; GISEL-NEXT: v_or_b32_e32 v2, v6, v10
+; GISEL-NEXT: v_or_b32_e32 v3, v7, v11
+; GISEL-NEXT: v_add_i32_e32 v13, vcc, 32, v13
; GISEL-NEXT: v_ffbh_u32_e32 v26, v5
; GISEL-NEXT: v_ffbh_u32_e32 v27, v4
-; GISEL-NEXT: v_add_i32_e32 v17, vcc, 32, v17
-; GISEL-NEXT: v_ffbh_u32_e32 v28, v13
-; GISEL-NEXT: v_ffbh_u32_e32 v29, v12
+; GISEL-NEXT: v_add_i32_e32 v15, vcc, 32, v15
+; GISEL-NEXT: v_ffbh_u32_e32 v28, v11
+; GISEL-NEXT: v_ffbh_u32_e32 v29, v10
; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
; GISEL-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[2:3]
-; GISEL-NEXT: v_min_u32_e32 v0, v14, v15
+; GISEL-NEXT: v_min_u32_e32 v0, v12, v13
; GISEL-NEXT: v_add_i32_e64 v1, s[6:7], 32, v27
-; GISEL-NEXT: v_min_u32_e32 v2, v16, v17
+; GISEL-NEXT: v_min_u32_e32 v2, v14, v15
; GISEL-NEXT: v_add_i32_e64 v3, s[6:7], 32, v29
; GISEL-NEXT: v_add_i32_e64 v0, s[6:7], 64, v0
; GISEL-NEXT: v_min_u32_e32 v1, v26, v1
@@ -665,32 +665,32 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_cndmask_b32_e64 v14, 0, 1, s[4:5]
; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5]
; GISEL-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
-; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[12:13]
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
; GISEL-NEXT: v_sub_i32_e32 v2, vcc, v0, v1
; GISEL-NEXT: v_subb_u32_e64 v3, s[4:5], 0, 0, vcc
; GISEL-NEXT: v_subb_u32_e64 v0, s[4:5], 0, 0, s[4:5]
; GISEL-NEXT: v_subb_u32_e64 v1, s[4:5], 0, 0, s[4:5]
-; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[10:11]
+; GISEL-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[16:17]
; GISEL-NEXT: v_cndmask_b32_e64 v15, 0, 1, vcc
-; GISEL-NEXT: v_xor_b32_e32 v10, 0x7f, v2
+; GISEL-NEXT: v_xor_b32_e32 v12, 0x7f, v2
; GISEL-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[0:1]
; GISEL-NEXT: v_cndmask_b32_e64 v16, 0, 1, vcc
-; GISEL-NEXT: v_or_b32_e32 v10, v10, v0
-; GISEL-NEXT: v_or_b32_e32 v11, v3, v1
+; GISEL-NEXT: v_or_b32_e32 v12, v12, v0
+; GISEL-NEXT: v_or_b32_e32 v13, v3, v1
; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
; GISEL-NEXT: v_cndmask_b32_e32 v15, v16, v15, vcc
-; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
-; GISEL-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; GISEL-NEXT: v_or_b32_e32 v11, v14, v15
-; GISEL-NEXT: v_and_b32_e32 v14, 1, v11
-; GISEL-NEXT: v_or_b32_e32 v10, v11, v10
+; GISEL-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[12:13]
+; GISEL-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc
+; GISEL-NEXT: v_or_b32_e32 v13, v14, v15
+; GISEL-NEXT: v_and_b32_e32 v14, 1, v13
+; GISEL-NEXT: v_or_b32_e32 v12, v13, v12
; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
; GISEL-NEXT: v_cndmask_b32_e64 v14, v6, 0, vcc
-; GISEL-NEXT: v_and_b32_e32 v16, 1, v10
+; GISEL-NEXT: v_and_b32_e32 v16, 1, v12
; GISEL-NEXT: v_cndmask_b32_e64 v15, v7, 0, vcc
-; GISEL-NEXT: v_cndmask_b32_e64 v10, v12, 0, vcc
-; GISEL-NEXT: v_cndmask_b32_e64 v11, v13, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v12, v10, 0, vcc
+; GISEL-NEXT: v_cndmask_b32_e64 v13, v11, 0, vcc
; GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
; GISEL-NEXT: s_and_saveexec_b64 s[12:13], s[4:5]
@@ -703,22 +703,22 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_addc_u32_e64 v28, vcc, 0, v0, s[4:5]
; GISEL-NEXT: v_addc_u32_e32 v29, vcc, 0, v1, vcc
; GISEL-NEXT: v_add_i32_e64 v14, s[4:5], v30, v2
-; GISEL-NEXT: v_sub_i32_e64 v10, s[4:5], 64, v30
+; GISEL-NEXT: v_sub_i32_e64 v12, s[4:5], 64, v30
; GISEL-NEXT: v_lshl_b64 v[0:1], v[6:7], v30
-; GISEL-NEXT: v_lshl_b64 v[2:3], v[12:13], v30
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[10:11], v30
; GISEL-NEXT: s_xor_b64 s[4:5], vcc, -1
-; GISEL-NEXT: v_lshr_b64 v[10:11], v[6:7], v10
+; GISEL-NEXT: v_lshr_b64 v[12:13], v[6:7], v12
; GISEL-NEXT: v_lshl_b64 v[16:17], v[6:7], v14
; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v30
; GISEL-NEXT: v_cndmask_b32_e32 v14, 0, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v15, 0, v1, vcc
-; GISEL-NEXT: v_or_b32_e32 v0, v10, v2
-; GISEL-NEXT: v_or_b32_e32 v1, v11, v3
+; GISEL-NEXT: v_or_b32_e32 v0, v12, v2
+; GISEL-NEXT: v_or_b32_e32 v1, v13, v3
; GISEL-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v30
-; GISEL-NEXT: v_cndmask_b32_e32 v10, v0, v12, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v11, v1, v13, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v12, v0, v10, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v13, v1, v11, vcc
; GISEL-NEXT: s_mov_b64 s[10:11], s[8:9]
; GISEL-NEXT: v_mov_b32_e32 v0, s8
; GISEL-NEXT: v_mov_b32_e32 v1, s9
@@ -730,26 +730,26 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: ; %bb.8: ; %udiv-preheader
; GISEL-NEXT: v_add_i32_e32 v32, vcc, 0xffffffc0, v26
; GISEL-NEXT: v_sub_i32_e32 v16, vcc, 64, v26
-; GISEL-NEXT: v_lshr_b64 v[0:1], v[12:13], v26
+; GISEL-NEXT: v_lshr_b64 v[0:1], v[10:11], v26
; GISEL-NEXT: v_lshr_b64 v[2:3], v[6:7], v26
; GISEL-NEXT: s_mov_b64 s[4:5], 0
; GISEL-NEXT: v_add_i32_e32 v30, vcc, -1, v20
; GISEL-NEXT: v_addc_u32_e32 v31, vcc, -1, v21, vcc
-; GISEL-NEXT: v_lshl_b64 v[16:17], v[12:13], v16
-; GISEL-NEXT: v_lshr_b64 v[12:13], v[12:13], v32
+; GISEL-NEXT: v_lshl_b64 v[16:17], v[10:11], v16
+; GISEL-NEXT: v_lshr_b64 v[10:11], v[10:11], v32
; GISEL-NEXT: v_addc_u32_e32 v32, vcc, -1, v4, vcc
; GISEL-NEXT: v_addc_u32_e32 v33, vcc, -1, v5, vcc
; GISEL-NEXT: s_mov_b64 s[6:7], s[4:5]
; GISEL-NEXT: v_or_b32_e32 v2, v2, v16
; GISEL-NEXT: v_or_b32_e32 v3, v3, v17
; GISEL-NEXT: v_cmp_gt_u32_e32 vcc, 64, v26
-; GISEL-NEXT: v_cndmask_b32_e32 v2, v12, v2, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v3, v13, v3, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v16, 0, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v17, 0, v1, vcc
; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v26
-; GISEL-NEXT: v_cndmask_b32_e32 v12, v2, v6, vcc
-; GISEL-NEXT: v_cndmask_b32_e32 v13, v3, v7, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v10, v2, v6, vcc
+; GISEL-NEXT: v_cndmask_b32_e32 v11, v3, v7, vcc
; GISEL-NEXT: v_mov_b32_e32 v7, 0
; GISEL-NEXT: v_mov_b32_e32 v0, s4
; GISEL-NEXT: v_mov_b32_e32 v1, s5
@@ -757,20 +757,20 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_mov_b32_e32 v3, s7
; GISEL-NEXT: .LBB0_9: ; %udiv-do-while
; GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
-; GISEL-NEXT: v_lshl_b64 v[2:3], v[12:13], 1
+; GISEL-NEXT: v_lshl_b64 v[2:3], v[10:11], 1
; GISEL-NEXT: v_lshl_b64 v[16:17], v[16:17], 1
-; GISEL-NEXT: v_lshrrev_b32_e32 v6, 31, v13
-; GISEL-NEXT: v_lshrrev_b32_e32 v34, 31, v11
-; GISEL-NEXT: v_lshl_b64 v[12:13], v[14:15], 1
-; GISEL-NEXT: v_lshl_b64 v[10:11], v[10:11], 1
+; GISEL-NEXT: v_lshrrev_b32_e32 v6, 31, v11
+; GISEL-NEXT: v_lshrrev_b32_e32 v34, 31, v13
+; GISEL-NEXT: v_lshl_b64 v[10:11], v[14:15], 1
+; GISEL-NEXT: v_lshl_b64 v[12:13], v[12:13], 1
; GISEL-NEXT: v_lshrrev_b32_e32 v14, 31, v15
; GISEL-NEXT: v_add_i32_e32 v26, vcc, -1, v26
; GISEL-NEXT: v_addc_u32_e32 v27, vcc, -1, v27, vcc
; GISEL-NEXT: v_or_b32_e32 v16, v16, v6
; GISEL-NEXT: v_or_b32_e32 v2, v2, v34
-; GISEL-NEXT: v_or_b32_e32 v10, v10, v14
-; GISEL-NEXT: v_or_b32_e32 v14, v0, v12
-; GISEL-NEXT: v_or_b32_e32 v15, v1, v13
+; GISEL-NEXT: v_or_b32_e32 v12, v12, v14
+; GISEL-NEXT: v_or_b32_e32 v14, v0, v10
+; GISEL-NEXT: v_or_b32_e32 v15, v1, v11
; GISEL-NEXT: v_addc_u32_e32 v28, vcc, -1, v28, vcc
; GISEL-NEXT: v_addc_u32_e32 v29, vcc, -1, v29, vcc
; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v30, v2
@@ -783,14 +783,14 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_ashrrev_i32_e32 v0, 31, v6
; GISEL-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
; GISEL-NEXT: v_and_b32_e32 v6, 1, v0
-; GISEL-NEXT: v_and_b32_e32 v12, v0, v20
-; GISEL-NEXT: v_and_b32_e32 v13, v0, v21
+; GISEL-NEXT: v_and_b32_e32 v10, v0, v20
+; GISEL-NEXT: v_and_b32_e32 v11, v0, v21
; GISEL-NEXT: v_and_b32_e32 v34, v0, v4
; GISEL-NEXT: v_and_b32_e32 v35, v0, v5
; GISEL-NEXT: v_mov_b32_e32 v0, v6
; GISEL-NEXT: v_mov_b32_e32 v1, v7
-; GISEL-NEXT: v_sub_i32_e32 v12, vcc, v2, v12
-; GISEL-NEXT: v_subb_u32_e32 v13, vcc, v3, v13, vcc
+; GISEL-NEXT: v_sub_i32_e32 v10, vcc, v2, v10
+; GISEL-NEXT: v_subb_u32_e32 v11, vcc, v3, v11, vcc
; GISEL-NEXT: v_subb_u32_e32 v16, vcc, v16, v34, vcc
; GISEL-NEXT: v_subb_u32_e32 v17, vcc, v17, v35, vcc
; GISEL-NEXT: s_andn2_b64 exec, exec, s[4:5]
@@ -800,9 +800,9 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: .LBB0_11: ; %Flow11
; GISEL-NEXT: s_or_b64 exec, exec, s[8:9]
; GISEL-NEXT: v_lshl_b64 v[2:3], v[14:15], 1
-; GISEL-NEXT: v_lshl_b64 v[10:11], v[10:11], 1
+; GISEL-NEXT: v_lshl_b64 v[12:13], v[12:13], 1
; GISEL-NEXT: v_lshrrev_b32_e32 v4, 31, v15
-; GISEL-NEXT: v_or_b32_e32 v10, v10, v4
+; GISEL-NEXT: v_or_b32_e32 v12, v12, v4
; GISEL-NEXT: v_or_b32_e32 v14, v0, v2
; GISEL-NEXT: v_or_b32_e32 v15, v1, v3
; GISEL-NEXT: .LBB0_12: ; %Flow12
@@ -815,8 +815,8 @@ define <2 x i128> @v_sdiv_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
; GISEL-NEXT: v_xor_b32_e32 v6, v9, v3
; GISEL-NEXT: v_xor_b32_e32 v4, v14, v7
; GISEL-NEXT: v_xor_b32_e32 v5, v15, v7
-; GISEL-NEXT: v_xor_b32_e32 v8, v10, v7
-; GISEL-NEXT: v_xor_b32_e32 v9, v11, v7
+; GISEL-NEXT: v_xor_b32_e32 v8, v12, v7
+; GISEL-NEXT: v_xor_b32_e32 v9, v13, v7
; GISEL-NEXT: v_sub_i32_e32 v0, vcc, v0, v3
; GISEL-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
; GISEL-NEXT: v_sub_i32_e64 v4, s[4:5], v4, v7
diff --git a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
index e7af746..e042157 100644
--- a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
@@ -20,7 +20,8 @@ define i128 @fptosi_f64_to_i128(double %x) {
; SDAG-NEXT: s_cbranch_execz .LBB0_10
; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end
; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6
-; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v7, vcc
+; SDAG-NEXT: v_mov_b32_e32 v1, -1
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v7, vcc
; SDAG-NEXT: s_movk_i32 s6, 0xff7f
; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v7, vcc
@@ -386,7 +387,8 @@ define i128 @fptoui_f64_to_i128(double %x) {
; SDAG-NEXT: s_cbranch_execz .LBB1_10
; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end
; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6
-; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v7, vcc
+; SDAG-NEXT: v_mov_b32_e32 v1, -1
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v7, vcc
; SDAG-NEXT: s_movk_i32 s6, 0xff7f
; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v7, vcc
@@ -749,9 +751,10 @@ define i128 @fptosi_f32_to_i128(float %x) {
; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc
; SDAG-NEXT: s_cbranch_execz .LBB2_10
; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end
-; SDAG-NEXT: v_mov_b32_e32 v6, 0
; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5
-; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc
+; SDAG-NEXT: v_mov_b32_e32 v1, -1
+; SDAG-NEXT: v_mov_b32_e32 v6, 0
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc
; SDAG-NEXT: s_movk_i32 s6, 0xff7f
; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc
@@ -1100,9 +1103,10 @@ define i128 @fptoui_f32_to_i128(float %x) {
; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc
; SDAG-NEXT: s_cbranch_execz .LBB3_10
; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end
-; SDAG-NEXT: v_mov_b32_e32 v6, 0
; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5
-; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc
+; SDAG-NEXT: v_mov_b32_e32 v1, -1
+; SDAG-NEXT: v_mov_b32_e32 v6, 0
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc
; SDAG-NEXT: s_movk_i32 s6, 0xff7f
; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc
@@ -1489,9 +1493,10 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) {
; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc
; SDAG-NEXT: s_cbranch_execz .LBB6_10
; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end
-; SDAG-NEXT: v_mov_b32_e32 v6, 0
; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5
-; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc
+; SDAG-NEXT: v_mov_b32_e32 v1, -1
+; SDAG-NEXT: v_mov_b32_e32 v6, 0
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc
; SDAG-NEXT: s_movk_i32 s6, 0xff7f
; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc
@@ -1836,9 +1841,10 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) {
; SDAG-NEXT: s_and_saveexec_b64 s[8:9], vcc
; SDAG-NEXT: s_cbranch_execz .LBB7_10
; SDAG-NEXT: ; %bb.1: ; %fp-to-i-if-end
-; SDAG-NEXT: v_mov_b32_e32 v6, 0
; SDAG-NEXT: v_add_co_u32_e32 v0, vcc, 0xffffff01, v5
-; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v6, vcc
+; SDAG-NEXT: v_mov_b32_e32 v1, -1
+; SDAG-NEXT: v_mov_b32_e32 v6, 0
+; SDAG-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; SDAG-NEXT: v_addc_co_u32_e32 v2, vcc, -1, v6, vcc
; SDAG-NEXT: s_movk_i32 s6, 0xff7f
; SDAG-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v6, vcc
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll
index 05403f0..a50791e 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll
@@ -7575,15 +7575,13 @@ define double @global_agent_atomic_fadd_ret_f64__amdgpu_no_fine_grained_memory(p
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; GFX7-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64
; GFX7-NEXT: v_mov_b32_e32 v5, v3
; GFX7-NEXT: v_mov_b32_e32 v4, v2
; GFX7-NEXT: s_mov_b64 s[8:9], 0
; GFX7-NEXT: .LBB38_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v11, v1
-; GFX7-NEXT: v_mov_b32_e32 v10, v0
; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5]
; GFX7-NEXT: v_mov_b32_e32 v0, v8
; GFX7-NEXT: v_mov_b32_e32 v1, v9
@@ -7593,7 +7591,9 @@ define double @global_agent_atomic_fadd_ret_f64__amdgpu_no_fine_grained_memory(p
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX7-NEXT: v_mov_b32_e32 v11, v1
; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v10, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_cbranch_execnz .LBB38_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7609,15 +7609,13 @@ define double @global_agent_atomic_fadd_ret_f64__amdgpu_no_fine_grained_memory(p
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; GFX6-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64
; GFX6-NEXT: v_mov_b32_e32 v5, v3
; GFX6-NEXT: v_mov_b32_e32 v4, v2
; GFX6-NEXT: s_mov_b64 s[8:9], 0
; GFX6-NEXT: .LBB38_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v11, v1
-; GFX6-NEXT: v_mov_b32_e32 v10, v0
; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5]
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v0, v8
@@ -7628,7 +7626,9 @@ define double @global_agent_atomic_fadd_ret_f64__amdgpu_no_fine_grained_memory(p
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX6-NEXT: v_mov_b32_e32 v11, v1
; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v10, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX6-NEXT: s_cbranch_execnz .LBB38_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7809,15 +7809,13 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_pos__amdgpu_no_fine_g
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:2040
+; GFX7-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:2040
; GFX7-NEXT: v_mov_b32_e32 v5, v3
; GFX7-NEXT: v_mov_b32_e32 v4, v2
; GFX7-NEXT: s_mov_b64 s[8:9], 0
; GFX7-NEXT: .LBB39_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v11, v1
-; GFX7-NEXT: v_mov_b32_e32 v10, v0
; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5]
; GFX7-NEXT: v_mov_b32_e32 v0, v8
; GFX7-NEXT: v_mov_b32_e32 v1, v9
@@ -7827,7 +7825,9 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_pos__amdgpu_no_fine_g
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX7-NEXT: v_mov_b32_e32 v11, v1
; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v10, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_cbranch_execnz .LBB39_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7843,15 +7843,13 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_pos__amdgpu_no_fine_g
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:2040
+; GFX6-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:2040
; GFX6-NEXT: v_mov_b32_e32 v5, v3
; GFX6-NEXT: v_mov_b32_e32 v4, v2
; GFX6-NEXT: s_mov_b64 s[8:9], 0
; GFX6-NEXT: .LBB39_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v11, v1
-; GFX6-NEXT: v_mov_b32_e32 v10, v0
; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5]
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v0, v8
@@ -7862,7 +7860,9 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_pos__amdgpu_no_fine_g
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX6-NEXT: v_mov_b32_e32 v11, v1
; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v10, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX6-NEXT: s_cbranch_execnz .LBB39_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8039,34 +8039,32 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_neg__amdgpu_no_fine_g
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: s_movk_i32 s4, 0xf800
-; GFX7-NEXT: v_mov_b32_e32 v7, v1
-; GFX7-NEXT: v_mov_b32_e32 v6, v0
; GFX7-NEXT: s_mov_b32 s5, -1
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s6, 0
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
-; GFX7-NEXT: v_add_i32_e32 v6, vcc, 0xfffff800, v6
+; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v[0:1], s[4:7], 0 addr64
+; GFX7-NEXT: v_add_i32_e32 v10, vcc, 0xfffff800, v0
; GFX7-NEXT: v_mov_b32_e32 v5, v3
; GFX7-NEXT: v_mov_b32_e32 v4, v2
-; GFX7-NEXT: v_addc_u32_e32 v7, vcc, -1, v7, vcc
+; GFX7-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc
; GFX7-NEXT: s_mov_b64 s[8:9], 0
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
; GFX7-NEXT: .LBB40_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v11, v1
-; GFX7-NEXT: v_mov_b32_e32 v10, v0
-; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v0, v8
-; GFX7-NEXT: v_mov_b32_e32 v1, v9
-; GFX7-NEXT: v_mov_b32_e32 v2, v10
-; GFX7-NEXT: v_mov_b32_e32 v3, v11
-; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc
+; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v0, v6
+; GFX7-NEXT: v_mov_b32_e32 v1, v7
+; GFX7-NEXT: v_mov_b32_e32 v2, v8
+; GFX7-NEXT: v_mov_b32_e32 v3, v9
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[10:11], s[4:7], 0 addr64 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v9, v1
; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v8, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_cbranch_execnz .LBB40_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8077,35 +8075,33 @@ define double @global_agent_atomic_fadd_ret_f64__offset12b_neg__amdgpu_no_fine_g
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: s_movk_i32 s4, 0xf800
-; GFX6-NEXT: v_mov_b32_e32 v7, v1
-; GFX6-NEXT: v_mov_b32_e32 v6, v0
; GFX6-NEXT: s_mov_b32 s5, -1
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, 0
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
-; GFX6-NEXT: v_add_i32_e32 v6, vcc, 0xfffff800, v6
+; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v[0:1], s[4:7], 0 addr64
+; GFX6-NEXT: v_add_i32_e32 v10, vcc, 0xfffff800, v0
; GFX6-NEXT: v_mov_b32_e32 v5, v3
; GFX6-NEXT: v_mov_b32_e32 v4, v2
-; GFX6-NEXT: v_addc_u32_e32 v7, vcc, -1, v7, vcc
+; GFX6-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc
; GFX6-NEXT: s_mov_b64 s[8:9], 0
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
; GFX6-NEXT: .LBB40_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v11, v1
-; GFX6-NEXT: v_mov_b32_e32 v10, v0
-; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], v[4:5]
+; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], v[4:5]
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v0, v8
-; GFX6-NEXT: v_mov_b32_e32 v1, v9
-; GFX6-NEXT: v_mov_b32_e32 v2, v10
-; GFX6-NEXT: v_mov_b32_e32 v3, v11
-; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc
+; GFX6-NEXT: v_mov_b32_e32 v0, v6
+; GFX6-NEXT: v_mov_b32_e32 v1, v7
+; GFX6-NEXT: v_mov_b32_e32 v2, v8
+; GFX6-NEXT: v_mov_b32_e32 v3, v9
+; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[10:11], s[4:7], 0 addr64 glc
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v9, v1
; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v8, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX6-NEXT: s_cbranch_execnz .LBB40_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll
index ac223fd..311faac 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmax.ll
@@ -4203,25 +4203,25 @@ define double @global_agent_atomic_fmax_ret_f64__amdgpu_no_remote_memory(ptr add
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
-; GFX7-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v[4:5], s[4:7], 0 addr64
+; GFX7-NEXT: v_max_f64 v[10:11], v[2:3], v[2:3]
; GFX7-NEXT: s_mov_b64 s[8:9], 0
; GFX7-NEXT: .LBB24_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v11, v1
-; GFX7-NEXT: v_mov_b32_e32 v10, v0
-; GFX7-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11]
-; GFX7-NEXT: v_max_f64 v[8:9], v[0:1], v[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v0, v8
-; GFX7-NEXT: v_mov_b32_e32 v1, v9
-; GFX7-NEXT: v_mov_b32_e32 v2, v10
-; GFX7-NEXT: v_mov_b32_e32 v3, v11
+; GFX7-NEXT: v_max_f64 v[0:1], v[8:9], v[8:9]
+; GFX7-NEXT: v_max_f64 v[6:7], v[0:1], v[10:11]
+; GFX7-NEXT: v_mov_b32_e32 v0, v6
+; GFX7-NEXT: v_mov_b32_e32 v1, v7
+; GFX7-NEXT: v_mov_b32_e32 v2, v8
+; GFX7-NEXT: v_mov_b32_e32 v3, v9
; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v9, v1
; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v8, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_cbranch_execnz .LBB24_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4237,26 +4237,25 @@ define double @global_agent_atomic_fmax_ret_f64__amdgpu_no_remote_memory(ptr add
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
-; GFX6-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v[4:5], s[4:7], 0 addr64
+; GFX6-NEXT: v_max_f64 v[10:11], v[2:3], v[2:3]
; GFX6-NEXT: s_mov_b64 s[8:9], 0
; GFX6-NEXT: .LBB24_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v11, v1
-; GFX6-NEXT: v_mov_b32_e32 v10, v0
-; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11]
-; GFX6-NEXT: v_max_f64 v[8:9], v[0:1], v[6:7]
-; GFX6-NEXT: v_mov_b32_e32 v0, v8
-; GFX6-NEXT: v_mov_b32_e32 v1, v9
-; GFX6-NEXT: v_mov_b32_e32 v2, v10
-; GFX6-NEXT: v_mov_b32_e32 v3, v11
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; GFX6-NEXT: v_max_f64 v[0:1], v[8:9], v[8:9]
+; GFX6-NEXT: v_max_f64 v[6:7], v[0:1], v[10:11]
+; GFX6-NEXT: v_mov_b32_e32 v0, v6
+; GFX6-NEXT: v_mov_b32_e32 v1, v7
+; GFX6-NEXT: v_mov_b32_e32 v2, v8
+; GFX6-NEXT: v_mov_b32_e32 v3, v9
; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v9, v1
; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v8, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX6-NEXT: s_cbranch_execnz .LBB24_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll
index 5653f85..e2808ee 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fmin.ll
@@ -4203,25 +4203,25 @@ define double @global_agent_atomic_fmin_ret_f64__amdgpu_no_remote_memory(ptr add
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
-; GFX7-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v[4:5], s[4:7], 0 addr64
+; GFX7-NEXT: v_max_f64 v[10:11], v[2:3], v[2:3]
; GFX7-NEXT: s_mov_b64 s[8:9], 0
; GFX7-NEXT: .LBB24_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v11, v1
-; GFX7-NEXT: v_mov_b32_e32 v10, v0
-; GFX7-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11]
-; GFX7-NEXT: v_min_f64 v[8:9], v[0:1], v[6:7]
-; GFX7-NEXT: v_mov_b32_e32 v0, v8
-; GFX7-NEXT: v_mov_b32_e32 v1, v9
-; GFX7-NEXT: v_mov_b32_e32 v2, v10
-; GFX7-NEXT: v_mov_b32_e32 v3, v11
+; GFX7-NEXT: v_max_f64 v[0:1], v[8:9], v[8:9]
+; GFX7-NEXT: v_min_f64 v[6:7], v[0:1], v[10:11]
+; GFX7-NEXT: v_mov_b32_e32 v0, v6
+; GFX7-NEXT: v_mov_b32_e32 v1, v7
+; GFX7-NEXT: v_mov_b32_e32 v2, v8
+; GFX7-NEXT: v_mov_b32_e32 v3, v9
; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v9, v1
; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v8, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_cbranch_execnz .LBB24_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4237,26 +4237,25 @@ define double @global_agent_atomic_fmin_ret_f64__amdgpu_no_remote_memory(ptr add
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
-; GFX6-NEXT: v_max_f64 v[6:7], v[2:3], v[2:3]
+; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v[4:5], s[4:7], 0 addr64
+; GFX6-NEXT: v_max_f64 v[10:11], v[2:3], v[2:3]
; GFX6-NEXT: s_mov_b64 s[8:9], 0
; GFX6-NEXT: .LBB24_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v11, v1
-; GFX6-NEXT: v_mov_b32_e32 v10, v0
-; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_max_f64 v[0:1], v[10:11], v[10:11]
-; GFX6-NEXT: v_min_f64 v[8:9], v[0:1], v[6:7]
-; GFX6-NEXT: v_mov_b32_e32 v0, v8
-; GFX6-NEXT: v_mov_b32_e32 v1, v9
-; GFX6-NEXT: v_mov_b32_e32 v2, v10
-; GFX6-NEXT: v_mov_b32_e32 v3, v11
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; GFX6-NEXT: v_max_f64 v[0:1], v[8:9], v[8:9]
+; GFX6-NEXT: v_min_f64 v[6:7], v[0:1], v[10:11]
+; GFX6-NEXT: v_mov_b32_e32 v0, v6
+; GFX6-NEXT: v_mov_b32_e32 v1, v7
+; GFX6-NEXT: v_mov_b32_e32 v2, v8
+; GFX6-NEXT: v_mov_b32_e32 v3, v9
; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v9, v1
; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v8, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX6-NEXT: s_cbranch_execnz .LBB24_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll
index f0e1615..11f0f38 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll
@@ -3913,15 +3913,13 @@ define double @global_agent_atomic_fsub_ret_f64(ptr addrspace(1) %ptr, double %v
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; GFX7-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64
; GFX7-NEXT: v_mov_b32_e32 v5, v3
; GFX7-NEXT: v_mov_b32_e32 v4, v2
; GFX7-NEXT: s_mov_b64 s[8:9], 0
; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v11, v1
-; GFX7-NEXT: v_mov_b32_e32 v10, v0
; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5]
; GFX7-NEXT: v_mov_b32_e32 v0, v8
; GFX7-NEXT: v_mov_b32_e32 v1, v9
@@ -3931,7 +3929,9 @@ define double @global_agent_atomic_fsub_ret_f64(ptr addrspace(1) %ptr, double %v
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX7-NEXT: v_mov_b32_e32 v11, v1
; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v10, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_cbranch_execnz .LBB16_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3947,15 +3947,13 @@ define double @global_agent_atomic_fsub_ret_f64(ptr addrspace(1) %ptr, double %v
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; GFX6-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64
; GFX6-NEXT: v_mov_b32_e32 v5, v3
; GFX6-NEXT: v_mov_b32_e32 v4, v2
; GFX6-NEXT: s_mov_b64 s[8:9], 0
; GFX6-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v11, v1
-; GFX6-NEXT: v_mov_b32_e32 v10, v0
; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5]
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v0, v8
@@ -3966,7 +3964,9 @@ define double @global_agent_atomic_fsub_ret_f64(ptr addrspace(1) %ptr, double %v
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX6-NEXT: v_mov_b32_e32 v11, v1
; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v10, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX6-NEXT: s_cbranch_execnz .LBB16_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4165,15 +4165,13 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_pos(ptr addrspace(1)
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:2040
+; GFX7-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:2040
; GFX7-NEXT: v_mov_b32_e32 v5, v3
; GFX7-NEXT: v_mov_b32_e32 v4, v2
; GFX7-NEXT: s_mov_b64 s[8:9], 0
; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v11, v1
-; GFX7-NEXT: v_mov_b32_e32 v10, v0
; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5]
; GFX7-NEXT: v_mov_b32_e32 v0, v8
; GFX7-NEXT: v_mov_b32_e32 v1, v9
@@ -4183,7 +4181,9 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_pos(ptr addrspace(1)
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX7-NEXT: v_mov_b32_e32 v11, v1
; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v10, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_cbranch_execnz .LBB17_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4199,15 +4199,13 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_pos(ptr addrspace(1)
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:2040
+; GFX6-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:2040
; GFX6-NEXT: v_mov_b32_e32 v5, v3
; GFX6-NEXT: v_mov_b32_e32 v4, v2
; GFX6-NEXT: s_mov_b64 s[8:9], 0
; GFX6-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v11, v1
-; GFX6-NEXT: v_mov_b32_e32 v10, v0
; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5]
; GFX6-NEXT: s_waitcnt expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v0, v8
@@ -4218,7 +4216,9 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_pos(ptr addrspace(1)
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX6-NEXT: v_mov_b32_e32 v11, v1
; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v10, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX6-NEXT: s_cbranch_execnz .LBB17_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4413,34 +4413,32 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_neg(ptr addrspace(1)
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: s_movk_i32 s4, 0xf800
-; GFX7-NEXT: v_mov_b32_e32 v7, v1
-; GFX7-NEXT: v_mov_b32_e32 v6, v0
; GFX7-NEXT: s_mov_b32 s5, -1
; GFX7-NEXT: s_mov_b32 s7, 0xf000
; GFX7-NEXT: s_mov_b32 s6, 0
-; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
-; GFX7-NEXT: v_add_i32_e32 v6, vcc, 0xfffff800, v6
+; GFX7-NEXT: buffer_load_dwordx2 v[8:9], v[0:1], s[4:7], 0 addr64
+; GFX7-NEXT: v_add_i32_e32 v10, vcc, 0xfffff800, v0
; GFX7-NEXT: v_mov_b32_e32 v5, v3
; GFX7-NEXT: v_mov_b32_e32 v4, v2
-; GFX7-NEXT: v_addc_u32_e32 v7, vcc, -1, v7, vcc
+; GFX7-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc
; GFX7-NEXT: s_mov_b64 s[8:9], 0
; GFX7-NEXT: s_mov_b32 s4, s6
; GFX7-NEXT: s_mov_b32 s5, s6
; GFX7-NEXT: .LBB18_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_mov_b32_e32 v11, v1
-; GFX7-NEXT: v_mov_b32_e32 v10, v0
-; GFX7-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v0, v8
-; GFX7-NEXT: v_mov_b32_e32 v1, v9
-; GFX7-NEXT: v_mov_b32_e32 v2, v10
-; GFX7-NEXT: v_mov_b32_e32 v3, v11
-; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc
+; GFX7-NEXT: v_add_f64 v[6:7], v[8:9], -v[4:5]
+; GFX7-NEXT: v_mov_b32_e32 v0, v6
+; GFX7-NEXT: v_mov_b32_e32 v1, v7
+; GFX7-NEXT: v_mov_b32_e32 v2, v8
+; GFX7-NEXT: v_mov_b32_e32 v3, v9
+; GFX7-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[10:11], s[4:7], 0 addr64 glc
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: buffer_wbinvl1
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v9, v1
; GFX7-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX7-NEXT: v_mov_b32_e32 v8, v0
; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_cbranch_execnz .LBB18_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4451,35 +4449,33 @@ define double @global_agent_atomic_fsub_ret_f64__offset12b_neg(ptr addrspace(1)
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX6-NEXT: s_movk_i32 s4, 0xf800
-; GFX6-NEXT: v_mov_b32_e32 v7, v1
-; GFX6-NEXT: v_mov_b32_e32 v6, v0
; GFX6-NEXT: s_mov_b32 s5, -1
; GFX6-NEXT: s_mov_b32 s7, 0xf000
; GFX6-NEXT: s_mov_b32 s6, 0
-; GFX6-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
-; GFX6-NEXT: v_add_i32_e32 v6, vcc, 0xfffff800, v6
+; GFX6-NEXT: buffer_load_dwordx2 v[8:9], v[0:1], s[4:7], 0 addr64
+; GFX6-NEXT: v_add_i32_e32 v10, vcc, 0xfffff800, v0
; GFX6-NEXT: v_mov_b32_e32 v5, v3
; GFX6-NEXT: v_mov_b32_e32 v4, v2
-; GFX6-NEXT: v_addc_u32_e32 v7, vcc, -1, v7, vcc
+; GFX6-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc
; GFX6-NEXT: s_mov_b64 s[8:9], 0
; GFX6-NEXT: s_mov_b32 s4, s6
; GFX6-NEXT: s_mov_b32 s5, s6
; GFX6-NEXT: .LBB18_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt vmcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v11, v1
-; GFX6-NEXT: v_mov_b32_e32 v10, v0
-; GFX6-NEXT: v_add_f64 v[8:9], v[10:11], -v[4:5]
+; GFX6-NEXT: v_add_f64 v[6:7], v[8:9], -v[4:5]
; GFX6-NEXT: s_waitcnt expcnt(0)
-; GFX6-NEXT: v_mov_b32_e32 v0, v8
-; GFX6-NEXT: v_mov_b32_e32 v1, v9
-; GFX6-NEXT: v_mov_b32_e32 v2, v10
-; GFX6-NEXT: v_mov_b32_e32 v3, v11
-; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc
+; GFX6-NEXT: v_mov_b32_e32 v0, v6
+; GFX6-NEXT: v_mov_b32_e32 v1, v7
+; GFX6-NEXT: v_mov_b32_e32 v2, v8
+; GFX6-NEXT: v_mov_b32_e32 v3, v9
+; GFX6-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[10:11], s[4:7], 0 addr64 glc
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: buffer_wbinvl1
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v9, v1
; GFX6-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GFX6-NEXT: v_mov_b32_e32 v8, v0
; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX6-NEXT: s_cbranch_execnz .LBB18_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
index 74f0f64..6a4c284 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
@@ -1502,13 +1502,11 @@ define i64 @global_atomic_sub_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB32_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_sub_i32_e32 v8, vcc, v10, v7
; SI-NEXT: v_subb_u32_e32 v9, vcc, v11, v6, vcc
; SI-NEXT: s_waitcnt expcnt(0)
@@ -1521,6 +1519,8 @@ define i64 @global_atomic_sub_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB32_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1593,13 +1593,11 @@ define i64 @global_atomic_sub_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB33_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_sub_i32_e32 v8, vcc, v10, v7
; SI-NEXT: v_subb_u32_e32 v9, vcc, v11, v6, vcc
; SI-NEXT: s_waitcnt expcnt(0)
@@ -1612,6 +1610,8 @@ define i64 @global_atomic_sub_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB33_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1883,43 +1883,42 @@ define amdgpu_gfx i64 @global_atomic_sub_i64_ret_scalar(ptr addrspace(1) inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v9, s6, 0
-; SI-NEXT: v_writelane_b32 v9, s7, 1
+; SI-NEXT: v_writelane_b32 v7, s6, 0
+; SI-NEXT: v_writelane_b32 v7, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[36:37], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
+; SI-NEXT: v_mov_b32_e32 v6, s35
; SI-NEXT: .LBB36_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v8, v1
-; SI-NEXT: v_mov_b32_e32 v7, v0
-; SI-NEXT: v_subrev_i32_e32 v5, vcc, s34, v7
-; SI-NEXT: v_subb_u32_e32 v6, vcc, v8, v4, vcc
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v5
-; SI-NEXT: v_mov_b32_e32 v1, v6
-; SI-NEXT: v_mov_b32_e32 v2, v7
-; SI-NEXT: v_mov_b32_e32 v3, v8
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT: v_subrev_i32_e32 v2, vcc, s34, v4
+; SI-NEXT: v_subb_u32_e32 v3, vcc, v5, v6, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB36_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
-; SI-NEXT: v_readlane_b32 s7, v9, 1
-; SI-NEXT: v_readlane_b32 s6, v9, 0
+; SI-NEXT: v_readlane_b32 s7, v7, 1
+; SI-NEXT: v_readlane_b32 s6, v7, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -1985,43 +1984,42 @@ define amdgpu_gfx i64 @global_atomic_sub_i64_ret_offset_scalar(ptr addrspace(1)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v9, s6, 0
-; SI-NEXT: v_writelane_b32 v9, s7, 1
+; SI-NEXT: v_writelane_b32 v7, s6, 0
+; SI-NEXT: v_writelane_b32 v7, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[36:37], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
+; SI-NEXT: v_mov_b32_e32 v6, s35
; SI-NEXT: .LBB37_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v8, v1
-; SI-NEXT: v_mov_b32_e32 v7, v0
-; SI-NEXT: v_subrev_i32_e32 v5, vcc, s34, v7
-; SI-NEXT: v_subb_u32_e32 v6, vcc, v8, v4, vcc
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v5
-; SI-NEXT: v_mov_b32_e32 v1, v6
-; SI-NEXT: v_mov_b32_e32 v2, v7
-; SI-NEXT: v_mov_b32_e32 v3, v8
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT: v_subrev_i32_e32 v2, vcc, s34, v4
+; SI-NEXT: v_subb_u32_e32 v3, vcc, v5, v6, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB37_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
-; SI-NEXT: v_readlane_b32 s7, v9, 1
-; SI-NEXT: v_readlane_b32 s6, v9, 0
+; SI-NEXT: v_readlane_b32 s7, v7, 1
+; SI-NEXT: v_readlane_b32 s6, v7, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -2342,13 +2340,11 @@ define i64 @global_atomic_and_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB42_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_and_b32_e32 v9, v11, v6
; SI-NEXT: v_and_b32_e32 v8, v10, v7
; SI-NEXT: s_waitcnt expcnt(0)
@@ -2361,6 +2357,8 @@ define i64 @global_atomic_and_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB42_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2433,13 +2431,11 @@ define i64 @global_atomic_and_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB43_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_and_b32_e32 v9, v11, v6
; SI-NEXT: v_and_b32_e32 v8, v10, v7
; SI-NEXT: s_waitcnt expcnt(0)
@@ -2452,6 +2448,8 @@ define i64 @global_atomic_and_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB43_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2726,14 +2724,11 @@ define amdgpu_gfx i64 @global_atomic_and_i64_ret_scalar(ptr addrspace(1) inreg %
; SI-NEXT: s_mov_b32 s35, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[36:37], 0
; SI-NEXT: .LBB46_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_and_b32_e32 v3, s34, v5
; SI-NEXT: v_and_b32_e32 v2, s35, v4
; SI-NEXT: v_mov_b32_e32 v0, v2
@@ -2745,6 +2740,8 @@ define amdgpu_gfx i64 @global_atomic_and_i64_ret_scalar(ptr addrspace(1) inreg %
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB46_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2825,14 +2822,11 @@ define amdgpu_gfx i64 @global_atomic_and_i64_ret_offset_scalar(ptr addrspace(1)
; SI-NEXT: s_mov_b32 s35, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[36:37], 0
; SI-NEXT: .LBB47_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_and_b32_e32 v3, s34, v5
; SI-NEXT: v_and_b32_e32 v2, s35, v4
; SI-NEXT: v_mov_b32_e32 v0, v2
@@ -2844,6 +2838,8 @@ define amdgpu_gfx i64 @global_atomic_and_i64_ret_offset_scalar(ptr addrspace(1)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB47_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3182,14 +3178,11 @@ define i64 @global_atomic_nand_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB52_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_and_b32_e32 v0, v11, v6
; SI-NEXT: v_and_b32_e32 v1, v10, v7
; SI-NEXT: v_not_b32_e32 v9, v0
@@ -3203,6 +3196,8 @@ define i64 @global_atomic_nand_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB52_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3279,14 +3274,11 @@ define i64 @global_atomic_nand_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB53_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_and_b32_e32 v0, v11, v6
; SI-NEXT: v_and_b32_e32 v1, v10, v7
; SI-NEXT: v_not_b32_e32 v9, v0
@@ -3300,6 +3292,8 @@ define i64 @global_atomic_nand_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB53_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3590,14 +3584,11 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_scalar(ptr addrspace(1) inreg
; SI-NEXT: s_mov_b32 s35, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[36:37], 0
; SI-NEXT: .LBB56_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_and_b32_e32 v0, s34, v5
; SI-NEXT: v_and_b32_e32 v1, s35, v4
; SI-NEXT: v_not_b32_e32 v3, v0
@@ -3611,6 +3602,8 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_scalar(ptr addrspace(1) inreg
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB56_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3695,14 +3688,11 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_offset_scalar(ptr addrspace(1)
; SI-NEXT: s_mov_b32 s35, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[36:37], 0
; SI-NEXT: .LBB57_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_and_b32_e32 v0, s34, v5
; SI-NEXT: v_and_b32_e32 v1, s35, v4
; SI-NEXT: v_not_b32_e32 v3, v0
@@ -3716,6 +3706,8 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_offset_scalar(ptr addrspace(1)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB57_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3891,14 +3883,11 @@ define i64 @global_atomic_nand_i64_ret_offset__amdgpu_no_remote_memory(ptr addrs
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB59_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_and_b32_e32 v0, v11, v6
; SI-NEXT: v_and_b32_e32 v1, v10, v7
; SI-NEXT: v_not_b32_e32 v9, v0
@@ -3912,6 +3901,8 @@ define i64 @global_atomic_nand_i64_ret_offset__amdgpu_no_remote_memory(ptr addrs
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB59_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4162,13 +4153,11 @@ define i64 @global_atomic_or_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB62_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_or_b32_e32 v9, v11, v6
; SI-NEXT: v_or_b32_e32 v8, v10, v7
; SI-NEXT: s_waitcnt expcnt(0)
@@ -4181,6 +4170,8 @@ define i64 @global_atomic_or_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB62_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4253,13 +4244,11 @@ define i64 @global_atomic_or_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB63_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_or_b32_e32 v9, v11, v6
; SI-NEXT: v_or_b32_e32 v8, v10, v7
; SI-NEXT: s_waitcnt expcnt(0)
@@ -4272,6 +4261,8 @@ define i64 @global_atomic_or_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB63_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4546,14 +4537,11 @@ define amdgpu_gfx i64 @global_atomic_or_i64_ret_scalar(ptr addrspace(1) inreg %p
; SI-NEXT: s_mov_b32 s35, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[36:37], 0
; SI-NEXT: .LBB66_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_or_b32_e32 v3, s34, v5
; SI-NEXT: v_or_b32_e32 v2, s35, v4
; SI-NEXT: v_mov_b32_e32 v0, v2
@@ -4565,6 +4553,8 @@ define amdgpu_gfx i64 @global_atomic_or_i64_ret_scalar(ptr addrspace(1) inreg %p
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB66_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4645,14 +4635,11 @@ define amdgpu_gfx i64 @global_atomic_or_i64_ret_offset_scalar(ptr addrspace(1) i
; SI-NEXT: s_mov_b32 s35, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[36:37], 0
; SI-NEXT: .LBB67_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_or_b32_e32 v3, s34, v5
; SI-NEXT: v_or_b32_e32 v2, s35, v4
; SI-NEXT: v_mov_b32_e32 v0, v2
@@ -4664,6 +4651,8 @@ define amdgpu_gfx i64 @global_atomic_or_i64_ret_offset_scalar(ptr addrspace(1) i
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB67_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4990,13 +4979,11 @@ define i64 @global_atomic_xor_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB72_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_xor_b32_e32 v9, v11, v6
; SI-NEXT: v_xor_b32_e32 v8, v10, v7
; SI-NEXT: s_waitcnt expcnt(0)
@@ -5009,6 +4996,8 @@ define i64 @global_atomic_xor_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB72_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5081,13 +5070,11 @@ define i64 @global_atomic_xor_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[4:5], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB73_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_xor_b32_e32 v9, v11, v6
; SI-NEXT: v_xor_b32_e32 v8, v10, v7
; SI-NEXT: s_waitcnt expcnt(0)
@@ -5100,6 +5087,8 @@ define i64 @global_atomic_xor_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB73_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5374,14 +5363,11 @@ define amdgpu_gfx i64 @global_atomic_xor_i64_ret_scalar(ptr addrspace(1) inreg %
; SI-NEXT: s_mov_b32 s35, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[36:37], 0
; SI-NEXT: .LBB76_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_xor_b32_e32 v3, s34, v5
; SI-NEXT: v_xor_b32_e32 v2, s35, v4
; SI-NEXT: v_mov_b32_e32 v0, v2
@@ -5393,6 +5379,8 @@ define amdgpu_gfx i64 @global_atomic_xor_i64_ret_scalar(ptr addrspace(1) inreg %
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB76_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5473,14 +5461,11 @@ define amdgpu_gfx i64 @global_atomic_xor_i64_ret_offset_scalar(ptr addrspace(1)
; SI-NEXT: s_mov_b32 s35, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[36:37], 0
; SI-NEXT: .LBB77_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_xor_b32_e32 v3, s34, v5
; SI-NEXT: v_xor_b32_e32 v2, s35, v4
; SI-NEXT: v_mov_b32_e32 v0, v2
@@ -5492,6 +5477,8 @@ define amdgpu_gfx i64 @global_atomic_xor_i64_ret_offset_scalar(ptr addrspace(1)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB77_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5824,13 +5811,11 @@ define i64 @global_atomic_max_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB82_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_cmp_gt_i64_e32 vcc, v[10:11], v[4:5]
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
@@ -5844,6 +5829,8 @@ define i64 @global_atomic_max_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB82_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5918,13 +5905,11 @@ define i64 @global_atomic_max_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB83_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_cmp_gt_i64_e32 vcc, v[10:11], v[4:5]
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
@@ -5938,6 +5923,8 @@ define i64 @global_atomic_max_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB83_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6223,45 +6210,45 @@ define amdgpu_gfx i64 @global_atomic_max_i64_ret_scalar(ptr addrspace(1) inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v10, s6, 0
-; SI-NEXT: v_writelane_b32 v10, s7, 1
+; SI-NEXT: v_writelane_b32 v8, s6, 0
+; SI-NEXT: v_writelane_b32 v8, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[36:37], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
-; SI-NEXT: v_mov_b32_e32 v5, s34
+; SI-NEXT: v_mov_b32_e32 v6, s35
+; SI-NEXT: v_mov_b32_e32 v7, s34
; SI-NEXT: .LBB86_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, v1
-; SI-NEXT: v_mov_b32_e32 v8, v0
-; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[8:9]
-; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
-; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[4:5]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v6
-; SI-NEXT: v_mov_b32_e32 v1, v7
-; SI-NEXT: v_mov_b32_e32 v2, v8
-; SI-NEXT: v_mov_b32_e32 v3, v9
+; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc
+; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB86_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
-; SI-NEXT: v_readlane_b32 s7, v10, 1
-; SI-NEXT: v_readlane_b32 s6, v10, 0
+; SI-NEXT: v_readlane_b32 s7, v8, 1
+; SI-NEXT: v_readlane_b32 s6, v8, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -6331,45 +6318,45 @@ define amdgpu_gfx i64 @global_atomic_max_i64_ret_offset_scalar(ptr addrspace(1)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v10, s6, 0
-; SI-NEXT: v_writelane_b32 v10, s7, 1
+; SI-NEXT: v_writelane_b32 v8, s6, 0
+; SI-NEXT: v_writelane_b32 v8, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[36:37], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
-; SI-NEXT: v_mov_b32_e32 v5, s34
+; SI-NEXT: v_mov_b32_e32 v6, s35
+; SI-NEXT: v_mov_b32_e32 v7, s34
; SI-NEXT: .LBB87_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, v1
-; SI-NEXT: v_mov_b32_e32 v8, v0
-; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[8:9]
-; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
-; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[4:5]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v6
-; SI-NEXT: v_mov_b32_e32 v1, v7
-; SI-NEXT: v_mov_b32_e32 v2, v8
-; SI-NEXT: v_mov_b32_e32 v3, v9
+; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc
+; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB87_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
-; SI-NEXT: v_readlane_b32 s7, v10, 1
-; SI-NEXT: v_readlane_b32 s6, v10, 0
+; SI-NEXT: v_readlane_b32 s7, v8, 1
+; SI-NEXT: v_readlane_b32 s6, v8, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -7176,13 +7163,11 @@ define i64 @global_atomic_umax_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB96_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_cmp_gt_u64_e32 vcc, v[10:11], v[4:5]
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
@@ -7196,6 +7181,8 @@ define i64 @global_atomic_umax_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB96_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7270,13 +7257,11 @@ define i64 @global_atomic_umax_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB97_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_cmp_gt_u64_e32 vcc, v[10:11], v[4:5]
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
@@ -7290,6 +7275,8 @@ define i64 @global_atomic_umax_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB97_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7575,45 +7562,45 @@ define amdgpu_gfx i64 @global_atomic_umax_i64_ret_scalar(ptr addrspace(1) inreg
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v10, s6, 0
-; SI-NEXT: v_writelane_b32 v10, s7, 1
+; SI-NEXT: v_writelane_b32 v8, s6, 0
+; SI-NEXT: v_writelane_b32 v8, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[36:37], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
-; SI-NEXT: v_mov_b32_e32 v5, s34
+; SI-NEXT: v_mov_b32_e32 v6, s35
+; SI-NEXT: v_mov_b32_e32 v7, s34
; SI-NEXT: .LBB100_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, v1
-; SI-NEXT: v_mov_b32_e32 v8, v0
-; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[8:9]
-; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
-; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[4:5]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v6
-; SI-NEXT: v_mov_b32_e32 v1, v7
-; SI-NEXT: v_mov_b32_e32 v2, v8
-; SI-NEXT: v_mov_b32_e32 v3, v9
+; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc
+; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB100_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
-; SI-NEXT: v_readlane_b32 s7, v10, 1
-; SI-NEXT: v_readlane_b32 s6, v10, 0
+; SI-NEXT: v_readlane_b32 s7, v8, 1
+; SI-NEXT: v_readlane_b32 s6, v8, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -7683,45 +7670,45 @@ define amdgpu_gfx i64 @global_atomic_umax_i64_ret_offset_scalar(ptr addrspace(1)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v10, s6, 0
-; SI-NEXT: v_writelane_b32 v10, s7, 1
+; SI-NEXT: v_writelane_b32 v8, s6, 0
+; SI-NEXT: v_writelane_b32 v8, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[36:37], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
-; SI-NEXT: v_mov_b32_e32 v5, s34
+; SI-NEXT: v_mov_b32_e32 v6, s35
+; SI-NEXT: v_mov_b32_e32 v7, s34
; SI-NEXT: .LBB101_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, v1
-; SI-NEXT: v_mov_b32_e32 v8, v0
-; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[8:9]
-; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
-; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[4:5]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v6
-; SI-NEXT: v_mov_b32_e32 v1, v7
-; SI-NEXT: v_mov_b32_e32 v2, v8
-; SI-NEXT: v_mov_b32_e32 v3, v9
+; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc
+; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB101_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
-; SI-NEXT: v_readlane_b32 s7, v10, 1
-; SI-NEXT: v_readlane_b32 s6, v10, 0
+; SI-NEXT: v_readlane_b32 s7, v8, 1
+; SI-NEXT: v_readlane_b32 s6, v8, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -8416,13 +8403,11 @@ define i64 @global_atomic_umin_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB109_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_cmp_le_u64_e32 vcc, v[10:11], v[4:5]
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
@@ -8436,6 +8421,8 @@ define i64 @global_atomic_umin_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB109_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8510,13 +8497,11 @@ define i64 @global_atomic_umin_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB110_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_cmp_le_u64_e32 vcc, v[10:11], v[4:5]
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
@@ -8530,6 +8515,8 @@ define i64 @global_atomic_umin_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB110_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8815,45 +8802,45 @@ define amdgpu_gfx i64 @global_atomic_umin_i64_ret_scalar(ptr addrspace(1) inreg
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v10, s6, 0
-; SI-NEXT: v_writelane_b32 v10, s7, 1
+; SI-NEXT: v_writelane_b32 v8, s6, 0
+; SI-NEXT: v_writelane_b32 v8, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[36:37], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
-; SI-NEXT: v_mov_b32_e32 v5, s34
+; SI-NEXT: v_mov_b32_e32 v6, s35
+; SI-NEXT: v_mov_b32_e32 v7, s34
; SI-NEXT: .LBB113_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, v1
-; SI-NEXT: v_mov_b32_e32 v8, v0
-; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[8:9]
-; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
-; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[4:5]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v6
-; SI-NEXT: v_mov_b32_e32 v1, v7
-; SI-NEXT: v_mov_b32_e32 v2, v8
-; SI-NEXT: v_mov_b32_e32 v3, v9
+; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc
+; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB113_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
-; SI-NEXT: v_readlane_b32 s7, v10, 1
-; SI-NEXT: v_readlane_b32 s6, v10, 0
+; SI-NEXT: v_readlane_b32 s7, v8, 1
+; SI-NEXT: v_readlane_b32 s6, v8, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -8923,45 +8910,45 @@ define amdgpu_gfx i64 @global_atomic_umin_i64_ret_offset_scalar(ptr addrspace(1)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v10, s6, 0
-; SI-NEXT: v_writelane_b32 v10, s7, 1
+; SI-NEXT: v_writelane_b32 v8, s6, 0
+; SI-NEXT: v_writelane_b32 v8, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[36:37], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
-; SI-NEXT: v_mov_b32_e32 v5, s34
+; SI-NEXT: v_mov_b32_e32 v6, s35
+; SI-NEXT: v_mov_b32_e32 v7, s34
; SI-NEXT: .LBB114_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, v1
-; SI-NEXT: v_mov_b32_e32 v8, v0
-; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[8:9]
-; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
-; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[4:5]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v6
-; SI-NEXT: v_mov_b32_e32 v1, v7
-; SI-NEXT: v_mov_b32_e32 v2, v8
-; SI-NEXT: v_mov_b32_e32 v3, v9
+; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc
+; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB114_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
-; SI-NEXT: v_readlane_b32 s7, v10, 1
-; SI-NEXT: v_readlane_b32 s6, v10, 0
+; SI-NEXT: v_readlane_b32 s7, v8, 1
+; SI-NEXT: v_readlane_b32 s6, v8, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -9292,13 +9279,11 @@ define i64 @global_atomic_min_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB119_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_cmp_le_i64_e32 vcc, v[10:11], v[4:5]
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
@@ -9312,6 +9297,8 @@ define i64 @global_atomic_min_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB119_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -9386,13 +9373,11 @@ define i64 @global_atomic_min_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB120_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: v_cmp_le_i64_e32 vcc, v[10:11], v[4:5]
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
@@ -9406,6 +9391,8 @@ define i64 @global_atomic_min_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB120_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -9691,45 +9678,45 @@ define amdgpu_gfx i64 @global_atomic_min_i64_ret_scalar(ptr addrspace(1) inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v10, s6, 0
-; SI-NEXT: v_writelane_b32 v10, s7, 1
+; SI-NEXT: v_writelane_b32 v8, s6, 0
+; SI-NEXT: v_writelane_b32 v8, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[36:37], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
-; SI-NEXT: v_mov_b32_e32 v5, s34
+; SI-NEXT: v_mov_b32_e32 v6, s35
+; SI-NEXT: v_mov_b32_e32 v7, s34
; SI-NEXT: .LBB123_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, v1
-; SI-NEXT: v_mov_b32_e32 v8, v0
-; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[8:9]
-; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
-; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[4:5]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v6
-; SI-NEXT: v_mov_b32_e32 v1, v7
-; SI-NEXT: v_mov_b32_e32 v2, v8
-; SI-NEXT: v_mov_b32_e32 v3, v9
+; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc
+; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB123_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
-; SI-NEXT: v_readlane_b32 s7, v10, 1
-; SI-NEXT: v_readlane_b32 s6, v10, 0
+; SI-NEXT: v_readlane_b32 s7, v8, 1
+; SI-NEXT: v_readlane_b32 s6, v8, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -9799,45 +9786,45 @@ define amdgpu_gfx i64 @global_atomic_min_i64_ret_offset_scalar(ptr addrspace(1)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v10, s6, 0
-; SI-NEXT: v_writelane_b32 v10, s7, 1
+; SI-NEXT: v_writelane_b32 v8, s6, 0
+; SI-NEXT: v_writelane_b32 v8, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[36:37], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
-; SI-NEXT: v_mov_b32_e32 v5, s34
+; SI-NEXT: v_mov_b32_e32 v6, s35
+; SI-NEXT: v_mov_b32_e32 v7, s34
; SI-NEXT: .LBB124_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, v1
-; SI-NEXT: v_mov_b32_e32 v8, v0
-; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[8:9]
-; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
-; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
+; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[4:5]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v0, v6
-; SI-NEXT: v_mov_b32_e32 v1, v7
-; SI-NEXT: v_mov_b32_e32 v2, v8
-; SI-NEXT: v_mov_b32_e32 v3, v9
+; SI-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc
+; SI-NEXT: v_cndmask_b32_e32 v2, v7, v4, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB124_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
-; SI-NEXT: v_readlane_b32 s7, v10, 1
-; SI-NEXT: v_readlane_b32 s6, v10, 0
+; SI-NEXT: v_readlane_b32 s7, v8, 1
+; SI-NEXT: v_readlane_b32 s6, v8, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -10645,14 +10632,11 @@ define i64 @global_atomic_uinc_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB133_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v10
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v11, vcc
; SI-NEXT: v_cmp_lt_u64_e32 vcc, v[10:11], v[4:5]
@@ -10667,6 +10651,8 @@ define i64 @global_atomic_uinc_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB133_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -10745,14 +10731,11 @@ define i64 @global_atomic_uinc_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %i
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s4, s6
; SI-NEXT: s_mov_b32 s5, s6
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[4:7], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: .LBB134_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v10
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v11, vcc
; SI-NEXT: v_cmp_lt_u64_e32 vcc, v[10:11], v[4:5]
@@ -10767,6 +10750,8 @@ define i64 @global_atomic_uinc_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %i
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
; SI-NEXT: s_cbranch_execnz .LBB134_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -11065,14 +11050,11 @@ define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_scalar(ptr addrspace(1) i
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[36:37], 0
; SI-NEXT: .LBB137_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v4
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
; SI-NEXT: v_cmp_gt_u64_e32 vcc, s[34:35], v[4:5]
@@ -11087,6 +11069,8 @@ define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_scalar(ptr addrspace(1) i
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB137_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -11173,14 +11157,11 @@ define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_offset_scalar(ptr addrspa
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[36:37], 0
; SI-NEXT: .LBB138_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v5, v1
-; SI-NEXT: v_mov_b32_e32 v4, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v4
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
; SI-NEXT: v_cmp_gt_u64_e32 vcc, s[34:35], v[4:5]
@@ -11195,6 +11176,8 @@ define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_offset_scalar(ptr addrspa
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
; SI-NEXT: s_cbranch_execnz .LBB138_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -11557,14 +11540,11 @@ define i64 @global_atomic_udec_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: s_mov_b32 s11, 0xf000
; SI-NEXT: s_mov_b32 s8, s10
; SI-NEXT: s_mov_b32 s9, s10
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[8:11], 0 addr64
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[8:11], 0 addr64
; SI-NEXT: s_mov_b64 s[6:7], 0
; SI-NEXT: .LBB143_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v10
; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v11, vcc
; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
@@ -11581,6 +11561,8 @@ define i64 @global_atomic_udec_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[6:7]
; SI-NEXT: s_cbranch_execnz .LBB143_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -11663,14 +11645,11 @@ define i64 @global_atomic_udec_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %i
; SI-NEXT: s_mov_b32 s11, 0xf000
; SI-NEXT: s_mov_b32 s8, s10
; SI-NEXT: s_mov_b32 s9, s10
-; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[8:11], 0 addr64 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[10:11], v[6:7], s[8:11], 0 addr64 offset:32
; SI-NEXT: s_mov_b64 s[6:7], 0
; SI-NEXT: .LBB144_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v11, v1
-; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v10
; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v11, vcc
; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
@@ -11687,6 +11666,8 @@ define i64 @global_atomic_udec_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %i
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
; SI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
+; SI-NEXT: v_mov_b32_e32 v11, v1
+; SI-NEXT: v_mov_b32_e32 v10, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[6:7]
; SI-NEXT: s_cbranch_execnz .LBB144_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -12004,49 +11985,48 @@ define amdgpu_gfx i64 @global_atomic_udec_wrap_i64_ret_scalar(ptr addrspace(1) i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v10, s6, 0
-; SI-NEXT: v_writelane_b32 v10, s7, 1
+; SI-NEXT: v_writelane_b32 v8, s6, 0
+; SI-NEXT: v_writelane_b32 v8, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
; SI-NEXT: s_mov_b64 s[38:39], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
-; SI-NEXT: v_mov_b32_e32 v5, s34
+; SI-NEXT: v_mov_b32_e32 v6, s35
+; SI-NEXT: v_mov_b32_e32 v7, s34
; SI-NEXT: .LBB147_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, v1
-; SI-NEXT: v_mov_b32_e32 v8, v0
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v8
-; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v9, vcc
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; SI-NEXT: v_cmp_lt_u64_e64 s[36:37], s[34:35], v[8:9]
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v5, vcc
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5]
+; SI-NEXT: v_cmp_lt_u64_e64 s[36:37], s[34:35], v[4:5]
; SI-NEXT: s_or_b64 vcc, vcc, s[36:37]
-; SI-NEXT: v_cndmask_b32_e32 v7, v1, v4, vcc
-; SI-NEXT: v_cndmask_b32_e32 v6, v0, v5, vcc
-; SI-NEXT: v_mov_b32_e32 v0, v6
-; SI-NEXT: v_mov_b32_e32 v1, v7
-; SI-NEXT: v_mov_b32_e32 v2, v8
-; SI-NEXT: v_mov_b32_e32 v3, v9
+; SI-NEXT: v_cndmask_b32_e32 v3, v1, v6, vcc
+; SI-NEXT: v_cndmask_b32_e32 v2, v0, v7, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[38:39]
; SI-NEXT: s_cbranch_execnz .LBB147_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[38:39]
-; SI-NEXT: v_readlane_b32 s7, v10, 1
-; SI-NEXT: v_readlane_b32 s6, v10, 0
+; SI-NEXT: v_readlane_b32 s7, v8, 1
+; SI-NEXT: v_readlane_b32 s6, v8, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -12124,49 +12104,48 @@ define amdgpu_gfx i64 @global_atomic_udec_wrap_i64_ret_offset_scalar(ptr addrspa
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v10, s6, 0
-; SI-NEXT: v_writelane_b32 v10, s7, 1
+; SI-NEXT: v_writelane_b32 v8, s6, 0
+; SI-NEXT: v_writelane_b32 v8, s7, 1
; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_mov_b32 s34, s6
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
+; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 offset:32
; SI-NEXT: s_mov_b64 s[38:39], 0
-; SI-NEXT: v_mov_b32_e32 v4, s35
-; SI-NEXT: v_mov_b32_e32 v5, s34
+; SI-NEXT: v_mov_b32_e32 v6, s35
+; SI-NEXT: v_mov_b32_e32 v7, s34
; SI-NEXT: .LBB148_1: ; %atomicrmw.start
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v9, v1
-; SI-NEXT: v_mov_b32_e32 v8, v0
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v8
-; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v9, vcc
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; SI-NEXT: v_cmp_lt_u64_e64 s[36:37], s[34:35], v[8:9]
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, -1, v5, vcc
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5]
+; SI-NEXT: v_cmp_lt_u64_e64 s[36:37], s[34:35], v[4:5]
; SI-NEXT: s_or_b64 vcc, vcc, s[36:37]
-; SI-NEXT: v_cndmask_b32_e32 v7, v1, v4, vcc
-; SI-NEXT: v_cndmask_b32_e32 v6, v0, v5, vcc
-; SI-NEXT: v_mov_b32_e32 v0, v6
-; SI-NEXT: v_mov_b32_e32 v1, v7
-; SI-NEXT: v_mov_b32_e32 v2, v8
-; SI-NEXT: v_mov_b32_e32 v3, v9
+; SI-NEXT: v_cndmask_b32_e32 v3, v1, v6, vcc
+; SI-NEXT: v_cndmask_b32_e32 v2, v0, v7, vcc
+; SI-NEXT: v_mov_b32_e32 v0, v2
+; SI-NEXT: v_mov_b32_e32 v1, v3
+; SI-NEXT: v_mov_b32_e32 v2, v4
+; SI-NEXT: v_mov_b32_e32 v3, v5
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
-; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
+; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
; SI-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
+; SI-NEXT: v_mov_b32_e32 v5, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_andn2_b64 exec, exec, s[38:39]
; SI-NEXT: s_cbranch_execnz .LBB148_1
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
; SI-NEXT: s_or_b64 exec, exec, s[38:39]
-; SI-NEXT: v_readlane_b32 s7, v10, 1
-; SI-NEXT: v_readlane_b32 s6, v10, 0
+; SI-NEXT: v_readlane_b32 s7, v8, 1
+; SI-NEXT: v_readlane_b32 s6, v8, 0
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[34:35]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/identical-subrange-spill-infloop.ll b/llvm/test/CodeGen/AMDGPU/identical-subrange-spill-infloop.ll
index 9b4539c..10d61de 100644
--- a/llvm/test/CodeGen/AMDGPU/identical-subrange-spill-infloop.ll
+++ b/llvm/test/CodeGen/AMDGPU/identical-subrange-spill-infloop.ll
@@ -6,96 +6,134 @@ define void @main(i1 %arg) #0 {
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: s_xor_saveexec_b64 s[4:5], -1
-; CHECK-NEXT: buffer_store_dword v5, off, s[0:3], s32 ; 4-byte Folded Spill
-; CHECK-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; CHECK-NEXT: buffer_store_dword v6, off, s[0:3], s32 ; 4-byte Folded Spill
+; CHECK-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; CHECK-NEXT: s_mov_b64 exec, s[4:5]
-; CHECK-NEXT: v_writelane_b32 v5, s30, 0
-; CHECK-NEXT: v_writelane_b32 v5, s31, 1
-; CHECK-NEXT: v_writelane_b32 v5, s36, 2
-; CHECK-NEXT: v_writelane_b32 v5, s37, 3
-; CHECK-NEXT: v_writelane_b32 v5, s38, 4
-; CHECK-NEXT: v_writelane_b32 v5, s39, 5
-; CHECK-NEXT: v_writelane_b32 v5, s48, 6
-; CHECK-NEXT: v_writelane_b32 v5, s49, 7
-; CHECK-NEXT: v_writelane_b32 v5, s50, 8
-; CHECK-NEXT: v_writelane_b32 v5, s51, 9
-; CHECK-NEXT: v_writelane_b32 v5, s52, 10
-; CHECK-NEXT: v_writelane_b32 v5, s53, 11
-; CHECK-NEXT: v_writelane_b32 v5, s54, 12
-; CHECK-NEXT: v_writelane_b32 v5, s55, 13
-; CHECK-NEXT: s_getpc_b64 s[24:25]
-; CHECK-NEXT: v_writelane_b32 v5, s64, 14
-; CHECK-NEXT: s_movk_i32 s4, 0xf0
-; CHECK-NEXT: s_mov_b32 s5, s24
-; CHECK-NEXT: v_writelane_b32 v5, s65, 15
-; CHECK-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x0
-; CHECK-NEXT: s_mov_b64 s[4:5], 0
-; CHECK-NEXT: v_writelane_b32 v5, s66, 16
-; CHECK-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
-; CHECK-NEXT: v_writelane_b32 v5, s67, 17
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: s_movk_i32 s6, 0x130
-; CHECK-NEXT: s_mov_b32 s7, s24
-; CHECK-NEXT: v_writelane_b32 v5, s68, 18
-; CHECK-NEXT: s_load_dwordx16 s[36:51], s[6:7], 0x0
-; CHECK-NEXT: v_writelane_b32 v5, s69, 19
-; CHECK-NEXT: v_writelane_b32 v5, s70, 20
+; CHECK-NEXT: v_writelane_b32 v6, s30, 0
+; CHECK-NEXT: v_writelane_b32 v6, s31, 1
+; CHECK-NEXT: v_writelane_b32 v6, s36, 2
+; CHECK-NEXT: v_writelane_b32 v6, s37, 3
+; CHECK-NEXT: v_writelane_b32 v6, s38, 4
+; CHECK-NEXT: v_writelane_b32 v6, s39, 5
+; CHECK-NEXT: v_writelane_b32 v6, s48, 6
+; CHECK-NEXT: v_writelane_b32 v6, s49, 7
+; CHECK-NEXT: v_writelane_b32 v6, s50, 8
+; CHECK-NEXT: v_writelane_b32 v6, s51, 9
+; CHECK-NEXT: v_writelane_b32 v6, s52, 10
+; CHECK-NEXT: v_writelane_b32 v6, s53, 11
+; CHECK-NEXT: v_writelane_b32 v6, s54, 12
+; CHECK-NEXT: v_writelane_b32 v6, s55, 13
+; CHECK-NEXT: v_writelane_b32 v6, s64, 14
+; CHECK-NEXT: v_writelane_b32 v6, s65, 15
+; CHECK-NEXT: v_writelane_b32 v6, s66, 16
+; CHECK-NEXT: v_writelane_b32 v6, s67, 17
+; CHECK-NEXT: v_writelane_b32 v6, s68, 18
+; CHECK-NEXT: s_getpc_b64 s[4:5]
+; CHECK-NEXT: s_mov_b64 s[8:9], 0
+; CHECK-NEXT: v_writelane_b32 v6, s69, 19
; CHECK-NEXT: s_mov_b32 s68, 0
-; CHECK-NEXT: v_writelane_b32 v5, s71, 21
+; CHECK-NEXT: s_mov_b32 s69, s4
+; CHECK-NEXT: s_load_dwordx4 s[4:7], s[8:9], 0x0
+; CHECK-NEXT: s_load_dwordx8 s[24:31], s[68:69], 0x30
+; CHECK-NEXT: s_load_dwordx16 s[52:67], s[68:69], 0xf0
+; CHECK-NEXT: ; kill: killed $sgpr8_sgpr9
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: s_load_dwordx16 s[8:23], s[68:69], 0x130
+; CHECK-NEXT: ; implicit-def: $vgpr7 : SGPR spill to VGPR lane
+; CHECK-NEXT: v_writelane_b32 v6, s70, 20
+; CHECK-NEXT: v_writelane_b32 v6, s71, 21
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: v_mov_b32_e32 v1, s4
; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: v_writelane_b32 v7, s8, 0
+; CHECK-NEXT: v_writelane_b32 v7, s9, 1
+; CHECK-NEXT: v_writelane_b32 v7, s10, 2
+; CHECK-NEXT: v_writelane_b32 v7, s11, 3
+; CHECK-NEXT: v_writelane_b32 v7, s12, 4
+; CHECK-NEXT: v_writelane_b32 v7, s13, 5
+; CHECK-NEXT: v_writelane_b32 v7, s14, 6
+; CHECK-NEXT: v_writelane_b32 v7, s15, 7
+; CHECK-NEXT: v_writelane_b32 v7, s16, 8
+; CHECK-NEXT: v_writelane_b32 v7, s17, 9
+; CHECK-NEXT: v_writelane_b32 v7, s18, 10
+; CHECK-NEXT: v_writelane_b32 v7, s19, 11
+; CHECK-NEXT: v_writelane_b32 v7, s20, 12
+; CHECK-NEXT: v_writelane_b32 v7, s21, 13
+; CHECK-NEXT: v_writelane_b32 v7, s22, 14
+; CHECK-NEXT: v_writelane_b32 v7, s23, 15
+; CHECK-NEXT: v_writelane_b32 v7, s52, 16
+; CHECK-NEXT: v_writelane_b32 v7, s53, 17
+; CHECK-NEXT: v_writelane_b32 v7, s54, 18
+; CHECK-NEXT: v_writelane_b32 v7, s55, 19
+; CHECK-NEXT: v_writelane_b32 v7, s56, 20
+; CHECK-NEXT: v_writelane_b32 v7, s57, 21
+; CHECK-NEXT: v_writelane_b32 v7, s58, 22
+; CHECK-NEXT: v_writelane_b32 v7, s59, 23
+; CHECK-NEXT: v_writelane_b32 v7, s60, 24
+; CHECK-NEXT: v_writelane_b32 v7, s61, 25
+; CHECK-NEXT: v_writelane_b32 v7, s62, 26
+; CHECK-NEXT: v_writelane_b32 v7, s63, 27
+; CHECK-NEXT: v_writelane_b32 v7, s64, 28
+; CHECK-NEXT: v_writelane_b32 v7, s65, 29
+; CHECK-NEXT: v_writelane_b32 v7, s66, 30
+; CHECK-NEXT: s_load_dwordx16 s[8:23], s[68:69], 0x1f0
+; CHECK-NEXT: s_load_dwordx16 s[36:51], s[68:69], 0x2f0
; CHECK-NEXT: s_mov_b32 s69, s68
; CHECK-NEXT: s_mov_b32 s70, s68
; CHECK-NEXT: s_mov_b32 s71, s68
-; CHECK-NEXT: image_sample_lz v3, v[1:2], s[16:23], s[68:71] dmask:0x1
+; CHECK-NEXT: v_writelane_b32 v7, s67, 31
+; CHECK-NEXT: image_sample_lz v3, v[1:2], s[60:67], s[68:71] dmask:0x1
+; CHECK-NEXT: v_readlane_b32 s52, v7, 0
; CHECK-NEXT: v_mov_b32_e32 v1, v2
-; CHECK-NEXT: ; implicit-def: $vgpr6 : SGPR spill to VGPR lane
-; CHECK-NEXT: s_mov_b32 s6, 48
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_writelane_b32 v6, s36, 0
-; CHECK-NEXT: v_writelane_b32 v6, s37, 1
-; CHECK-NEXT: v_writelane_b32 v6, s38, 2
-; CHECK-NEXT: v_writelane_b32 v6, s39, 3
-; CHECK-NEXT: v_writelane_b32 v6, s40, 4
-; CHECK-NEXT: v_writelane_b32 v6, s41, 5
-; CHECK-NEXT: image_sample_lz v4, v[1:2], s[36:43], s[68:71] dmask:0x1
-; CHECK-NEXT: v_writelane_b32 v6, s42, 6
-; CHECK-NEXT: v_writelane_b32 v6, s43, 7
-; CHECK-NEXT: v_writelane_b32 v6, s44, 8
-; CHECK-NEXT: v_writelane_b32 v6, s45, 9
-; CHECK-NEXT: v_writelane_b32 v6, s46, 10
-; CHECK-NEXT: v_writelane_b32 v6, s47, 11
-; CHECK-NEXT: v_writelane_b32 v6, s48, 12
-; CHECK-NEXT: v_writelane_b32 v6, s49, 13
-; CHECK-NEXT: v_writelane_b32 v6, s50, 14
-; CHECK-NEXT: s_movk_i32 s56, 0x1f0
-; CHECK-NEXT: s_movk_i32 s72, 0x2f0
-; CHECK-NEXT: s_mov_b32 s57, s24
-; CHECK-NEXT: s_mov_b32 s73, s24
-; CHECK-NEXT: v_writelane_b32 v6, s51, 15
-; CHECK-NEXT: s_load_dwordx8 s[24:31], s[6:7], 0x0
-; CHECK-NEXT: s_load_dwordx16 s[36:51], s[56:57], 0x0
-; CHECK-NEXT: v_and_b32_e32 v0, 1, v0
-; CHECK-NEXT: s_load_dwordx16 s[52:67], s[72:73], 0x0
-; CHECK-NEXT: v_cmp_ne_u32_e64 s[4:5], 1, v0
+; CHECK-NEXT: v_readlane_b32 s53, v7, 1
+; CHECK-NEXT: v_readlane_b32 s54, v7, 2
+; CHECK-NEXT: v_readlane_b32 s55, v7, 3
+; CHECK-NEXT: v_readlane_b32 s56, v7, 4
+; CHECK-NEXT: v_readlane_b32 s57, v7, 5
+; CHECK-NEXT: v_readlane_b32 s58, v7, 6
+; CHECK-NEXT: v_readlane_b32 s59, v7, 7
+; CHECK-NEXT: v_and_b32_e32 v5, 1, v0
+; CHECK-NEXT: v_cmp_ne_u32_e64 s[4:5], 1, v5
+; CHECK-NEXT: v_readlane_b32 s60, v7, 8
+; CHECK-NEXT: v_readlane_b32 s61, v7, 9
+; CHECK-NEXT: v_readlane_b32 s62, v7, 10
+; CHECK-NEXT: image_sample_lz v4, v[1:2], s[52:59], s[68:71] dmask:0x1
+; CHECK-NEXT: v_readlane_b32 s63, v7, 11
+; CHECK-NEXT: v_readlane_b32 s64, v7, 12
+; CHECK-NEXT: v_readlane_b32 s65, v7, 13
+; CHECK-NEXT: v_readlane_b32 s66, v7, 14
+; CHECK-NEXT: v_readlane_b32 s67, v7, 15
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_mul_f32_e32 v0, v4, v3
; CHECK-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; CHECK-NEXT: s_xor_b64 s[6:7], exec, s[6:7]
; CHECK-NEXT: s_cbranch_execz .LBB0_3
; CHECK-NEXT: ; %bb.1: ; %bb48
-; CHECK-NEXT: image_sample_lz v3, v[1:2], s[16:23], s[68:71] dmask:0x1
-; CHECK-NEXT: v_mov_b32_e32 v1, v2
+; CHECK-NEXT: v_readlane_b32 s52, v7, 16
+; CHECK-NEXT: v_readlane_b32 s60, v7, 24
+; CHECK-NEXT: v_readlane_b32 s61, v7, 25
+; CHECK-NEXT: v_readlane_b32 s62, v7, 26
+; CHECK-NEXT: v_readlane_b32 s63, v7, 27
+; CHECK-NEXT: v_readlane_b32 s64, v7, 28
+; CHECK-NEXT: v_readlane_b32 s65, v7, 29
+; CHECK-NEXT: v_readlane_b32 s66, v7, 30
+; CHECK-NEXT: v_readlane_b32 s67, v7, 31
; CHECK-NEXT: s_and_b64 vcc, exec, -1
+; CHECK-NEXT: v_readlane_b32 s53, v7, 17
+; CHECK-NEXT: v_readlane_b32 s54, v7, 18
+; CHECK-NEXT: v_readlane_b32 s55, v7, 19
+; CHECK-NEXT: v_readlane_b32 s56, v7, 20
+; CHECK-NEXT: image_sample_lz v3, v[1:2], s[60:67], s[68:71] dmask:0x1
+; CHECK-NEXT: v_mov_b32_e32 v1, v2
+; CHECK-NEXT: v_readlane_b32 s57, v7, 21
+; CHECK-NEXT: v_readlane_b32 s58, v7, 22
+; CHECK-NEXT: v_readlane_b32 s59, v7, 23
; CHECK-NEXT: .LBB0_2: ; %bb50
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_mov_b32 s69, s68
-; CHECK-NEXT: s_mov_b32 s70, s68
-; CHECK-NEXT: s_mov_b32 s71, s68
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: image_sample_lz v4, v[1:2], s[44:51], s[28:31] dmask:0x1
+; CHECK-NEXT: image_sample_lz v4, v[1:2], s[16:23], s[28:31] dmask:0x1
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: image_sample_lz v1, v[1:2], s[60:67], s[68:71] dmask:0x1
+; CHECK-NEXT: image_sample_lz v1, v[1:2], s[44:51], s[68:71] dmask:0x1
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_sub_f32_e32 v1, v1, v4
; CHECK-NEXT: v_mul_f32_e32 v1, v1, v0
@@ -103,60 +141,75 @@ define void @main(i1 %arg) #0 {
; CHECK-NEXT: s_mov_b64 vcc, vcc
; CHECK-NEXT: s_cbranch_vccnz .LBB0_2
; CHECK-NEXT: .LBB0_3: ; %Flow14
-; CHECK-NEXT: s_andn2_saveexec_b64 s[20:21], s[6:7]
+; CHECK-NEXT: s_andn2_saveexec_b64 s[6:7], s[6:7]
; CHECK-NEXT: s_cbranch_execz .LBB0_10
; CHECK-NEXT: ; %bb.4: ; %bb32
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_and_saveexec_b64 s[16:17], s[4:5]
-; CHECK-NEXT: s_xor_b64 s[22:23], exec, s[16:17]
+; CHECK-NEXT: s_xor_b64 s[4:5], exec, s[16:17]
; CHECK-NEXT: s_cbranch_execz .LBB0_6
; CHECK-NEXT: ; %bb.5: ; %bb43
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: s_mov_b32 s44, 0
-; CHECK-NEXT: s_mov_b32 s45, s44
-; CHECK-NEXT: v_mov_b32_e32 v2, s44
-; CHECK-NEXT: v_mov_b32_e32 v3, s45
-; CHECK-NEXT: s_mov_b32 s46, s44
-; CHECK-NEXT: s_mov_b32 s47, s44
-; CHECK-NEXT: image_sample_lz v1, v[2:3], s[8:15], s[44:47] dmask:0x1
-; CHECK-NEXT: v_readlane_b32 s4, v6, 0
-; CHECK-NEXT: v_readlane_b32 s12, v6, 8
-; CHECK-NEXT: v_readlane_b32 s13, v6, 9
-; CHECK-NEXT: v_readlane_b32 s14, v6, 10
-; CHECK-NEXT: v_readlane_b32 s15, v6, 11
-; CHECK-NEXT: v_readlane_b32 s16, v6, 12
-; CHECK-NEXT: v_readlane_b32 s17, v6, 13
-; CHECK-NEXT: v_readlane_b32 s18, v6, 14
-; CHECK-NEXT: v_readlane_b32 s19, v6, 15
-; CHECK-NEXT: v_readlane_b32 s5, v6, 1
-; CHECK-NEXT: v_readlane_b32 s6, v6, 2
-; CHECK-NEXT: v_readlane_b32 s7, v6, 3
-; CHECK-NEXT: v_readlane_b32 s8, v6, 4
-; CHECK-NEXT: v_readlane_b32 s9, v6, 5
-; CHECK-NEXT: image_sample_lz v0, v[2:3], s[12:19], s[24:27] dmask:0x1
-; CHECK-NEXT: v_mov_b32_e32 v2, 0
-; CHECK-NEXT: v_mov_b32_e32 v3, v2
-; CHECK-NEXT: v_readlane_b32 s10, v6, 6
-; CHECK-NEXT: v_readlane_b32 s11, v6, 7
+; CHECK-NEXT: s_mov_b32 s16, 0
+; CHECK-NEXT: s_mov_b32 s17, s16
+; CHECK-NEXT: v_mov_b32_e32 v0, s16
+; CHECK-NEXT: v_readlane_b32 s44, v7, 16
+; CHECK-NEXT: v_mov_b32_e32 v1, s17
+; CHECK-NEXT: s_mov_b32 s18, s16
+; CHECK-NEXT: s_mov_b32 s19, s16
+; CHECK-NEXT: v_readlane_b32 s45, v7, 17
+; CHECK-NEXT: v_readlane_b32 s46, v7, 18
+; CHECK-NEXT: v_readlane_b32 s47, v7, 19
+; CHECK-NEXT: v_readlane_b32 s48, v7, 20
+; CHECK-NEXT: v_readlane_b32 s49, v7, 21
+; CHECK-NEXT: v_readlane_b32 s50, v7, 22
+; CHECK-NEXT: v_readlane_b32 s51, v7, 23
+; CHECK-NEXT: v_readlane_b32 s52, v7, 24
+; CHECK-NEXT: v_readlane_b32 s53, v7, 25
+; CHECK-NEXT: v_readlane_b32 s54, v7, 26
+; CHECK-NEXT: v_readlane_b32 s55, v7, 27
+; CHECK-NEXT: v_readlane_b32 s56, v7, 28
+; CHECK-NEXT: v_readlane_b32 s57, v7, 29
+; CHECK-NEXT: v_readlane_b32 s58, v7, 30
+; CHECK-NEXT: v_readlane_b32 s59, v7, 31
+; CHECK-NEXT: image_sample_lz v2, v[0:1], s[44:51], s[16:19] dmask:0x1
+; CHECK-NEXT: v_readlane_b32 s44, v7, 0
+; CHECK-NEXT: v_readlane_b32 s52, v7, 8
+; CHECK-NEXT: v_readlane_b32 s53, v7, 9
+; CHECK-NEXT: v_readlane_b32 s54, v7, 10
+; CHECK-NEXT: v_readlane_b32 s55, v7, 11
+; CHECK-NEXT: v_readlane_b32 s56, v7, 12
+; CHECK-NEXT: v_readlane_b32 s57, v7, 13
+; CHECK-NEXT: v_readlane_b32 s58, v7, 14
+; CHECK-NEXT: v_readlane_b32 s59, v7, 15
+; CHECK-NEXT: v_mov_b32_e32 v3, 0
+; CHECK-NEXT: v_mov_b32_e32 v4, v3
+; CHECK-NEXT: v_readlane_b32 s45, v7, 1
+; CHECK-NEXT: v_readlane_b32 s46, v7, 2
+; CHECK-NEXT: v_readlane_b32 s47, v7, 3
+; CHECK-NEXT: image_sample_lz v0, v[0:1], s[52:59], s[24:27] dmask:0x1
+; CHECK-NEXT: v_readlane_b32 s48, v7, 4
+; CHECK-NEXT: v_readlane_b32 s49, v7, 5
+; CHECK-NEXT: v_readlane_b32 s50, v7, 6
+; CHECK-NEXT: v_readlane_b32 s51, v7, 7
; CHECK-NEXT: s_waitcnt vmcnt(1)
-; CHECK-NEXT: buffer_store_dwordx3 v[1:3], off, s[44:47], 0
+; CHECK-NEXT: buffer_store_dwordx3 v[2:4], off, s[16:19], 0
; CHECK-NEXT: s_waitcnt vmcnt(1)
-; CHECK-NEXT: buffer_store_dwordx4 v[0:3], off, s[44:47], 0
+; CHECK-NEXT: buffer_store_dwordx4 v[0:3], off, s[16:19], 0
; CHECK-NEXT: ; implicit-def: $vgpr0
; CHECK-NEXT: .LBB0_6: ; %Flow12
-; CHECK-NEXT: s_andn2_saveexec_b64 s[4:5], s[22:23]
+; CHECK-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; CHECK-NEXT: s_cbranch_execz .LBB0_9
; CHECK-NEXT: ; %bb.7: ; %bb33.preheader
-; CHECK-NEXT: s_mov_b32 s8, 0
-; CHECK-NEXT: s_mov_b32 s12, s8
-; CHECK-NEXT: s_mov_b32 s13, s8
-; CHECK-NEXT: v_mov_b32_e32 v1, s12
-; CHECK-NEXT: s_mov_b32 s9, s8
-; CHECK-NEXT: s_mov_b32 s10, s8
-; CHECK-NEXT: s_mov_b32 s11, s8
-; CHECK-NEXT: v_mov_b32_e32 v2, s13
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: image_sample_lz v3, v[1:2], s[36:43], s[8:11] dmask:0x1
-; CHECK-NEXT: image_sample_lz v4, v[1:2], s[52:59], s[8:11] dmask:0x1
+; CHECK-NEXT: s_mov_b32 s16, 0
+; CHECK-NEXT: s_mov_b32 s20, s16
+; CHECK-NEXT: s_mov_b32 s21, s16
+; CHECK-NEXT: v_mov_b32_e32 v1, s20
+; CHECK-NEXT: s_mov_b32 s17, s16
+; CHECK-NEXT: s_mov_b32 s18, s16
+; CHECK-NEXT: s_mov_b32 s19, s16
+; CHECK-NEXT: v_mov_b32_e32 v2, s21
+; CHECK-NEXT: image_sample_lz v3, v[1:2], s[8:15], s[16:19] dmask:0x1
+; CHECK-NEXT: image_sample_lz v4, v[1:2], s[36:43], s[16:19] dmask:0x1
; CHECK-NEXT: s_and_b64 vcc, exec, 0
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: v_sub_f32_e32 v1, v4, v3
@@ -171,33 +224,33 @@ define void @main(i1 %arg) #0 {
; CHECK-NEXT: .LBB0_9: ; %Flow13
; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
; CHECK-NEXT: .LBB0_10: ; %UnifiedReturnBlock
-; CHECK-NEXT: s_or_b64 exec, exec, s[20:21]
-; CHECK-NEXT: v_readlane_b32 s71, v5, 21
-; CHECK-NEXT: v_readlane_b32 s70, v5, 20
-; CHECK-NEXT: v_readlane_b32 s69, v5, 19
-; CHECK-NEXT: v_readlane_b32 s68, v5, 18
+; CHECK-NEXT: s_or_b64 exec, exec, s[6:7]
+; CHECK-NEXT: v_readlane_b32 s71, v6, 21
+; CHECK-NEXT: v_readlane_b32 s70, v6, 20
+; CHECK-NEXT: v_readlane_b32 s69, v6, 19
+; CHECK-NEXT: v_readlane_b32 s68, v6, 18
+; CHECK-NEXT: v_readlane_b32 s67, v6, 17
+; CHECK-NEXT: v_readlane_b32 s66, v6, 16
+; CHECK-NEXT: v_readlane_b32 s65, v6, 15
+; CHECK-NEXT: v_readlane_b32 s64, v6, 14
+; CHECK-NEXT: v_readlane_b32 s55, v6, 13
+; CHECK-NEXT: v_readlane_b32 s54, v6, 12
+; CHECK-NEXT: v_readlane_b32 s53, v6, 11
+; CHECK-NEXT: v_readlane_b32 s52, v6, 10
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_readlane_b32 s67, v5, 17
-; CHECK-NEXT: v_readlane_b32 s66, v5, 16
-; CHECK-NEXT: v_readlane_b32 s65, v5, 15
-; CHECK-NEXT: v_readlane_b32 s64, v5, 14
-; CHECK-NEXT: v_readlane_b32 s55, v5, 13
-; CHECK-NEXT: v_readlane_b32 s54, v5, 12
-; CHECK-NEXT: v_readlane_b32 s53, v5, 11
-; CHECK-NEXT: v_readlane_b32 s52, v5, 10
-; CHECK-NEXT: v_readlane_b32 s51, v5, 9
-; CHECK-NEXT: v_readlane_b32 s50, v5, 8
-; CHECK-NEXT: v_readlane_b32 s49, v5, 7
-; CHECK-NEXT: v_readlane_b32 s48, v5, 6
-; CHECK-NEXT: v_readlane_b32 s39, v5, 5
-; CHECK-NEXT: v_readlane_b32 s38, v5, 4
-; CHECK-NEXT: v_readlane_b32 s37, v5, 3
-; CHECK-NEXT: v_readlane_b32 s36, v5, 2
-; CHECK-NEXT: v_readlane_b32 s31, v5, 1
-; CHECK-NEXT: v_readlane_b32 s30, v5, 0
+; CHECK-NEXT: v_readlane_b32 s51, v6, 9
+; CHECK-NEXT: v_readlane_b32 s50, v6, 8
+; CHECK-NEXT: v_readlane_b32 s49, v6, 7
+; CHECK-NEXT: v_readlane_b32 s48, v6, 6
+; CHECK-NEXT: v_readlane_b32 s39, v6, 5
+; CHECK-NEXT: v_readlane_b32 s38, v6, 4
+; CHECK-NEXT: v_readlane_b32 s37, v6, 3
+; CHECK-NEXT: v_readlane_b32 s36, v6, 2
+; CHECK-NEXT: v_readlane_b32 s31, v6, 1
+; CHECK-NEXT: v_readlane_b32 s30, v6, 0
; CHECK-NEXT: s_xor_saveexec_b64 s[4:5], -1
-; CHECK-NEXT: buffer_load_dword v5, off, s[0:3], s32 ; 4-byte Folded Reload
-; CHECK-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; CHECK-NEXT: buffer_load_dword v6, off, s[0:3], s32 ; 4-byte Folded Reload
+; CHECK-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
; CHECK-NEXT: s_mov_b64 exec, s[4:5]
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/infer-addrspace-flat-atomic.ll b/llvm/test/CodeGen/AMDGPU/infer-addrspace-flat-atomic.ll
index 59dfd71..bd11b07 100644
--- a/llvm/test/CodeGen/AMDGPU/infer-addrspace-flat-atomic.ll
+++ b/llvm/test/CodeGen/AMDGPU/infer-addrspace-flat-atomic.ll
@@ -11,8 +11,8 @@ define protected amdgpu_kernel void @InferNothing(i32 %a, ptr %b, double %c) {
; CHECK-NEXT: v_mov_b32_e32 v2, s2
; CHECK-NEXT: v_mov_b32_e32 v3, s3
; CHECK-NEXT: s_lshl_b64 s[2:3], s[6:7], 3
-; CHECK-NEXT: s_add_u32 s0, s2, s0
-; CHECK-NEXT: s_addc_u32 s1, s3, s1
+; CHECK-NEXT: s_add_u32 s0, s0, s2
+; CHECK-NEXT: s_addc_u32 s1, s1, s3
; CHECK-NEXT: v_mov_b32_e32 v1, s1
; CHECK-NEXT: v_add_co_u32_e64 v0, vcc, -8, s0
; CHECK-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v1, vcc
@@ -69,13 +69,13 @@ define protected amdgpu_kernel void @InferMixed(i32 %a, ptr addrspace(1) %b, dou
; CHECK-NEXT: s_lshl_b64 s[2:3], s[6:7], 3
; CHECK-NEXT: s_add_u32 s0, s0, s2
; CHECK-NEXT: s_addc_u32 s1, s1, s3
+; CHECK-NEXT: s_add_u32 s0, s0, -8
+; CHECK-NEXT: s_addc_u32 s1, s1, -1
; CHECK-NEXT: flat_atomic_add_f64 v[0:1], v[2:3]
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: buffer_wbinvl1_vol
-; CHECK-NEXT: v_mov_b32_e32 v1, s1
-; CHECK-NEXT: v_add_co_u32_e64 v0, vcc, -7, s0
-; CHECK-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v1, vcc
-; CHECK-NEXT: flat_atomic_add_f64 v[0:1], v[2:3]
+; CHECK-NEXT: v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1]
+; CHECK-NEXT: flat_atomic_add_f64 v[0:1], v[2:3] offset:1
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: buffer_wbinvl1_vol
; CHECK-NEXT: s_endpgm
@@ -113,7 +113,7 @@ define protected amdgpu_kernel void @InferPHI(i32 %a, ptr addrspace(1) %b, doubl
; CHECK-NEXT: s_addc_u32 s1, s1, s5
; CHECK-NEXT: s_add_u32 s4, s0, -8
; CHECK-NEXT: s_addc_u32 s5, s1, -1
-; CHECK-NEXT: s_cmp_eq_u64 s[0:1], 9
+; CHECK-NEXT: s_cmp_eq_u64 s[4:5], 1
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0
diff --git a/llvm/test/CodeGen/AMDGPU/lds-frame-extern.ll b/llvm/test/CodeGen/AMDGPU/lds-frame-extern.ll
index 48bf7fb..3eef616 100644
--- a/llvm/test/CodeGen/AMDGPU/lds-frame-extern.ll
+++ b/llvm/test/CodeGen/AMDGPU/lds-frame-extern.ll
@@ -46,8 +46,8 @@ define void @use_extern_normal() #0 {
; CHECK-NEXT: s_ashr_i32 s5, s15, 31
; CHECK-NEXT: v_mov_b32_e32 v0, 0x4048f5c3
; CHECK-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
-; CHECK-NEXT: s_add_u32 s4, s4, s6
-; CHECK-NEXT: s_addc_u32 s5, s5, s7
+; CHECK-NEXT: s_add_u32 s4, s6, s4
+; CHECK-NEXT: s_addc_u32 s5, s7, s5
; CHECK-NEXT: s_load_dword s4, s[4:5], 0x0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: v_mov_b32_e32 v1, s4
@@ -70,8 +70,8 @@ define void @use_extern_overalign() #0 {
; CHECK-NEXT: s_ashr_i32 s5, s15, 31
; CHECK-NEXT: v_mov_b32_e32 v0, 0x42280000
; CHECK-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
-; CHECK-NEXT: s_add_u32 s4, s4, s6
-; CHECK-NEXT: s_addc_u32 s5, s5, s7
+; CHECK-NEXT: s_add_u32 s4, s6, s4
+; CHECK-NEXT: s_addc_u32 s5, s7, s5
; CHECK-NEXT: s_load_dword s4, s[4:5], 0x0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: v_mov_b32_e32 v1, s4
diff --git a/llvm/test/CodeGen/AMDGPU/limit-coalesce.mir b/llvm/test/CodeGen/AMDGPU/limit-coalesce.mir
index ca77482..fa52b96 100644
--- a/llvm/test/CodeGen/AMDGPU/limit-coalesce.mir
+++ b/llvm/test/CodeGen/AMDGPU/limit-coalesce.mir
@@ -1,19 +1,9 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6
# RUN: llc -mtriple=amdgcn -run-pass register-coalescer -o - %s | FileCheck %s
-# Check that coalescer does not create wider register tuple than in source
-
-# CHECK: - { id: 2, class: vreg_64, preferred-register: '', flags: [ ] }
-# CHECK: - { id: 3, class: vreg_64, preferred-register: '', flags: [ ] }
-# CHECK: - { id: 4, class: vreg_64, preferred-register: '', flags: [ ] }
-# CHECK: - { id: 5, class: vreg_96, preferred-register: '', flags: [ ] }
-# CHECK: - { id: 6, class: vreg_96, preferred-register: '', flags: [ ] }
-# CHECK: - { id: 7, class: vreg_128, preferred-register: '', flags: [ ] }
-# CHECK: - { id: 8, class: vreg_128, preferred-register: '', flags: [ ] }
+# Check that coalescer does not create wider register tuple than in
+# source.
# No more registers shall be defined
-# CHECK-NEXT: liveins:
-# CHECK: FLAT_STORE_DWORDX2 $vgpr0_vgpr1, %4,
-# CHECK: FLAT_STORE_DWORDX3 $vgpr0_vgpr1, %6,
-
---
name: main
alignment: 1
@@ -52,6 +42,23 @@ body: |
bb.0.entry:
liveins: $sgpr0, $vgpr0_vgpr1
+ ; CHECK-LABEL: name: main
+ ; CHECK: liveins: $sgpr0, $vgpr0_vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:vreg_64 = IMPLICIT_DEF
+ ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub0:vreg_64 = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY:%[0-9]+]].sub1:vreg_64 = COPY [[DEF]].sub0
+ ; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0:vreg_64 = COPY [[COPY]].sub1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:vreg_64 = COPY [[COPY]].sub0
+ ; CHECK-NEXT: FLAT_STORE_DWORDX2 $vgpr0_vgpr1, [[COPY1]], 0, 0, implicit $exec, implicit $flat_scr
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vreg_96 = IMPLICIT_DEF
+ ; CHECK-NEXT: undef [[COPY2:%[0-9]+]].sub0_sub1:vreg_96 = COPY [[DEF1]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]].sub2:vreg_96 = COPY [[DEF]].sub0
+ ; CHECK-NEXT: FLAT_STORE_DWORDX3 $vgpr0_vgpr1, [[COPY2]], 0, 0, implicit $exec, implicit $flat_scr
+ ; CHECK-NEXT: [[DEF2:%[0-9]+]]:vreg_128 = IMPLICIT_DEF
+ ; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0_sub1_sub2:vreg_128 = COPY [[DEF2]]
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]].sub3:vreg_128 = COPY [[DEF]].sub0
+ ; CHECK-NEXT: FLAT_STORE_DWORDX4 $vgpr0_vgpr1, [[COPY3]], 0, 0, implicit $exec, implicit $flat_scr
%3 = IMPLICIT_DEF
undef %4.sub0 = COPY $sgpr0
%4.sub1 = COPY %3.sub0
diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll
index bc3d378..3aa3663 100644
--- a/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll
+++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll
@@ -11,9 +11,9 @@
; GCN-O0: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O0>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(atomic-expand,verify,gc-lowering,lower-constant-intrinsics,unreachableblockelim,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa,require<uniformity>,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,localstackalloc))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,phi-node-elimination,two-address-instruction,regallocfast,si-fix-vgpr-copies,remove-redundant-debug-values,fixup-statepoint-caller-saved,prolog-epilog,post-ra-pseudos,si-post-ra-bundler,fentry-insert,xray-instrumentation,patchable-function,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
-; GCN-O2: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O2>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,early-cse<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,early-cse<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(codegenprepare,load-store-vectorizer,lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
+; GCN-O2: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O2>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,early-cse<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,early-cse<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments,codegenprepare,load-store-vectorizer),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
-; GCN-O3: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O3>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,gvn<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,gvn<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(codegenprepare,load-store-vectorizer,lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
+; GCN-O3: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O3>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,gvn<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,gvn<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments,codegenprepare,load-store-vectorizer),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
define void @empty() {
ret void
diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
index 65d0102..6e52125 100644
--- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
+++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
@@ -232,15 +232,15 @@
; GCN-O1-NEXT: AMDGPU Preload Kernel Arguments
; GCN-O1-NEXT: FunctionPass Manager
; GCN-O1-NEXT: AMDGPU Lower Kernel Arguments
+; GCN-O1-NEXT: Dominator Tree Construction
+; GCN-O1-NEXT: Natural Loop Information
+; GCN-O1-NEXT: CodeGen Prepare
; GCN-O1-NEXT: Lower buffer fat pointer operations to buffer resources
; GCN-O1-NEXT: AMDGPU lower intrinsics
; GCN-O1-NEXT: CallGraph Construction
; GCN-O1-NEXT: Call Graph SCC Pass Manager
; GCN-O1-NEXT: DummyCGSCCPass
; GCN-O1-NEXT: FunctionPass Manager
-; GCN-O1-NEXT: Dominator Tree Construction
-; GCN-O1-NEXT: Natural Loop Information
-; GCN-O1-NEXT: CodeGen Prepare
; GCN-O1-NEXT: Lazy Value Information Analysis
; GCN-O1-NEXT: Lower SwitchInst's to branches
; GCN-O1-NEXT: Lower invoke and unwind, for unwindless code generators
@@ -533,21 +533,21 @@
; GCN-O1-OPTS-NEXT: AMDGPU Preload Kernel Arguments
; GCN-O1-OPTS-NEXT: FunctionPass Manager
; GCN-O1-OPTS-NEXT: AMDGPU Lower Kernel Arguments
+; GCN-O1-OPTS-NEXT: Dominator Tree Construction
+; GCN-O1-OPTS-NEXT: Natural Loop Information
+; GCN-O1-OPTS-NEXT: CodeGen Prepare
+; GCN-O1-OPTS-NEXT: Dominator Tree Construction
+; GCN-O1-OPTS-NEXT: Basic Alias Analysis (stateless AA impl)
+; GCN-O1-OPTS-NEXT: Function Alias Analysis Results
+; GCN-O1-OPTS-NEXT: Natural Loop Information
+; GCN-O1-OPTS-NEXT: Scalar Evolution Analysis
+; GCN-O1-OPTS-NEXT: GPU Load and Store Vectorizer
; GCN-O1-OPTS-NEXT: Lower buffer fat pointer operations to buffer resources
; GCN-O1-OPTS-NEXT: AMDGPU lower intrinsics
; GCN-O1-OPTS-NEXT: CallGraph Construction
; GCN-O1-OPTS-NEXT: Call Graph SCC Pass Manager
; GCN-O1-OPTS-NEXT: DummyCGSCCPass
; GCN-O1-OPTS-NEXT: FunctionPass Manager
-; GCN-O1-OPTS-NEXT: Dominator Tree Construction
-; GCN-O1-OPTS-NEXT: Natural Loop Information
-; GCN-O1-OPTS-NEXT: CodeGen Prepare
-; GCN-O1-OPTS-NEXT: Dominator Tree Construction
-; GCN-O1-OPTS-NEXT: Basic Alias Analysis (stateless AA impl)
-; GCN-O1-OPTS-NEXT: Function Alias Analysis Results
-; GCN-O1-OPTS-NEXT: Natural Loop Information
-; GCN-O1-OPTS-NEXT: Scalar Evolution Analysis
-; GCN-O1-OPTS-NEXT: GPU Load and Store Vectorizer
; GCN-O1-OPTS-NEXT: Lazy Value Information Analysis
; GCN-O1-OPTS-NEXT: Lower SwitchInst's to branches
; GCN-O1-OPTS-NEXT: Lower invoke and unwind, for unwindless code generators
@@ -852,21 +852,21 @@
; GCN-O2-NEXT: AMDGPU Preload Kernel Arguments
; GCN-O2-NEXT: FunctionPass Manager
; GCN-O2-NEXT: AMDGPU Lower Kernel Arguments
+; GCN-O2-NEXT: Dominator Tree Construction
+; GCN-O2-NEXT: Natural Loop Information
+; GCN-O2-NEXT: CodeGen Prepare
+; GCN-O2-NEXT: Dominator Tree Construction
+; GCN-O2-NEXT: Basic Alias Analysis (stateless AA impl)
+; GCN-O2-NEXT: Function Alias Analysis Results
+; GCN-O2-NEXT: Natural Loop Information
+; GCN-O2-NEXT: Scalar Evolution Analysis
+; GCN-O2-NEXT: GPU Load and Store Vectorizer
; GCN-O2-NEXT: Lower buffer fat pointer operations to buffer resources
; GCN-O2-NEXT: AMDGPU lower intrinsics
; GCN-O2-NEXT: CallGraph Construction
; GCN-O2-NEXT: Call Graph SCC Pass Manager
; GCN-O2-NEXT: DummyCGSCCPass
; GCN-O2-NEXT: FunctionPass Manager
-; GCN-O2-NEXT: Dominator Tree Construction
-; GCN-O2-NEXT: Natural Loop Information
-; GCN-O2-NEXT: CodeGen Prepare
-; GCN-O2-NEXT: Dominator Tree Construction
-; GCN-O2-NEXT: Basic Alias Analysis (stateless AA impl)
-; GCN-O2-NEXT: Function Alias Analysis Results
-; GCN-O2-NEXT: Natural Loop Information
-; GCN-O2-NEXT: Scalar Evolution Analysis
-; GCN-O2-NEXT: GPU Load and Store Vectorizer
; GCN-O2-NEXT: Lazy Value Information Analysis
; GCN-O2-NEXT: Lower SwitchInst's to branches
; GCN-O2-NEXT: Lower invoke and unwind, for unwindless code generators
@@ -1186,21 +1186,21 @@
; GCN-O3-NEXT: AMDGPU Preload Kernel Arguments
; GCN-O3-NEXT: FunctionPass Manager
; GCN-O3-NEXT: AMDGPU Lower Kernel Arguments
+; GCN-O3-NEXT: Dominator Tree Construction
+; GCN-O3-NEXT: Natural Loop Information
+; GCN-O3-NEXT: CodeGen Prepare
+; GCN-O3-NEXT: Dominator Tree Construction
+; GCN-O3-NEXT: Basic Alias Analysis (stateless AA impl)
+; GCN-O3-NEXT: Function Alias Analysis Results
+; GCN-O3-NEXT: Natural Loop Information
+; GCN-O3-NEXT: Scalar Evolution Analysis
+; GCN-O3-NEXT: GPU Load and Store Vectorizer
; GCN-O3-NEXT: Lower buffer fat pointer operations to buffer resources
; GCN-O3-NEXT: AMDGPU lower intrinsics
; GCN-O3-NEXT: CallGraph Construction
; GCN-O3-NEXT: Call Graph SCC Pass Manager
; GCN-O3-NEXT: DummyCGSCCPass
; GCN-O3-NEXT: FunctionPass Manager
-; GCN-O3-NEXT: Dominator Tree Construction
-; GCN-O3-NEXT: Natural Loop Information
-; GCN-O3-NEXT: CodeGen Prepare
-; GCN-O3-NEXT: Dominator Tree Construction
-; GCN-O3-NEXT: Basic Alias Analysis (stateless AA impl)
-; GCN-O3-NEXT: Function Alias Analysis Results
-; GCN-O3-NEXT: Natural Loop Information
-; GCN-O3-NEXT: Scalar Evolution Analysis
-; GCN-O3-NEXT: GPU Load and Store Vectorizer
; GCN-O3-NEXT: Lazy Value Information Analysis
; GCN-O3-NEXT: Lower SwitchInst's to branches
; GCN-O3-NEXT: Lower invoke and unwind, for unwindless code generators
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll
index de7d234..b9bf76c 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.quadmask.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=1 < %s | FileCheck -check-prefixes=GFX11 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=0 < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=1 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -global-isel=0 < %s | FileCheck -check-prefixes=GFX11,GFX11-SDAG %s
declare i32 @llvm.amdgcn.s.quadmask.i32(i32)
declare i64 @llvm.amdgcn.s.quadmask.i64(i64)
@@ -172,3 +172,91 @@ entry:
%qm = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %mask)
ret i64 %qm
}
+
+;; Ensure that AND/ICMP cannot be fused into an AND because s_quadmask_b32 implicitly defines SCC.
+define amdgpu_kernel void @test_scc_quadmask_32(i32 %val0, i32 %val1, ptr addrspace(1) %ptr) {
+; GFX11-GISEL-LABEL: test_scc_quadmask_32:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: s_and_b32 s0, s0, 1
+; GFX11-GISEL-NEXT: s_quadmask_b32 s1, s1
+; GFX11-GISEL-NEXT: s_cmp_eq_u32 s0, 0
+; GFX11-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v3, s1
+; GFX11-GISEL-NEXT: s_cselect_b32 s0, 1, 0
+; GFX11-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v4, s0
+; GFX11-GISEL-NEXT: global_store_b32 v2, v3, s[2:3]
+; GFX11-GISEL-NEXT: global_store_b32 v[0:1], v4, off
+; GFX11-GISEL-NEXT: s_endpgm
+;
+; GFX11-SDAG-LABEL: test_scc_quadmask_32:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT: s_and_b32 s0, s0, 1
+; GFX11-SDAG-NEXT: s_quadmask_b32 s1, s1
+; GFX11-SDAG-NEXT: s_cmp_eq_u32 s0, 0
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, s1
+; GFX11-SDAG-NEXT: s_cselect_b32 s0, -1, 0
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v4, 0, 1, s0
+; GFX11-SDAG-NEXT: global_store_b32 v2, v3, s[2:3]
+; GFX11-SDAG-NEXT: global_store_b32 v[0:1], v4, off
+; GFX11-SDAG-NEXT: s_endpgm
+ %and = and i32 %val0, 1
+ %result = call i32 @llvm.amdgcn.s.quadmask.i32(i32 %val1) nounwind readnone
+ store i32 %result, ptr addrspace(1) %ptr
+ %cmp = icmp eq i32 %and, 0
+ %sel = select i1 %cmp, i32 1, i32 0
+ store i32 %sel, ptr addrspace(1) null, align 4
+ ret void
+}
+
+;; Ensure that AND/ICMP cannot be fused into an AND because s_quadmask_b64 implicitly defines SCC.
+define amdgpu_kernel void @test_scc_quadmask_64(i32 %val0, i64 %val1, ptr addrspace(1) %ptr) {
+; GFX11-GISEL-LABEL: test_scc_quadmask_64:
+; GFX11-GISEL: ; %bb.0:
+; GFX11-GISEL-NEXT: s_clause 0x1
+; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x2c
+; GFX11-GISEL-NEXT: s_load_b32 s4, s[4:5], 0x24
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: s_quadmask_b64 s[0:1], s[0:1]
+; GFX11-GISEL-NEXT: s_and_b32 s4, s4, 1
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-GISEL-NEXT: s_cmp_eq_u32 s4, 0
+; GFX11-GISEL-NEXT: v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v1, s1
+; GFX11-GISEL-NEXT: s_cselect_b32 s0, 1, 0
+; GFX11-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v5, s0
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v3, 0
+; GFX11-GISEL-NEXT: global_store_b64 v4, v[0:1], s[2:3]
+; GFX11-GISEL-NEXT: global_store_b32 v[2:3], v5, off
+; GFX11-GISEL-NEXT: s_endpgm
+;
+; GFX11-SDAG-LABEL: test_scc_quadmask_64:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_clause 0x1
+; GFX11-SDAG-NEXT: s_load_b32 s6, s[4:5], 0x24
+; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x2c
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v4, 0
+; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT: s_and_b32 s4, s6, 1
+; GFX11-SDAG-NEXT: s_quadmask_b64 s[0:1], s[0:1]
+; GFX11-SDAG-NEXT: s_cmp_eq_u32 s4, 0
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, s1
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, s0
+; GFX11-SDAG-NEXT: s_cselect_b32 s0, -1, 0
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, 0
+; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v5, 0, 1, s0
+; GFX11-SDAG-NEXT: global_store_b64 v4, v[2:3], s[2:3]
+; GFX11-SDAG-NEXT: global_store_b32 v[0:1], v5, off
+; GFX11-SDAG-NEXT: s_endpgm
+ %and = and i32 %val0, 1
+ %result = call i64 @llvm.amdgcn.s.quadmask.i64(i64 %val1) nounwind readnone
+ store i64 %result, ptr addrspace(1) %ptr
+ %cmp = icmp eq i32 %and, 0
+ %sel = select i1 %cmp, i32 1, i32 0
+ store i32 %sel, ptr addrspace(1) null, align 4
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
index ba5ce8b..8bb7274 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
@@ -76,13 +76,12 @@ define amdgpu_kernel void @v_round_f64(ptr addrspace(1) %out, ptr addrspace(1) %
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
; SI-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
-; SI-NEXT: s_movk_i32 s4, 0xfc01
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_mov_b32 s3, 0xfffff
; SI-NEXT: v_mov_b32_e32 v8, 0x3ff00000
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_bfe_u32 v4, v3, 20, 11
-; SI-NEXT: v_add_i32_e32 v6, vcc, s4, v4
+; SI-NEXT: v_add_i32_e32 v6, vcc, 0xfffffc01, v4
; SI-NEXT: v_lshr_b64 v[4:5], s[2:3], v6
; SI-NEXT: v_and_b32_e32 v7, 0x80000000, v3
; SI-NEXT: v_bfi_b32 v5, v5, 0, v3
diff --git a/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll b/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll
index ea9d5e8..1e6b77e 100644
--- a/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll
+++ b/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll
@@ -400,9 +400,9 @@ define amdgpu_kernel void @copy_flat_divergent(ptr nocapture %d, ptr nocapture r
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX12-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v2, s1, v0, s6
+; GFX12-NEXT: v_add_co_u32 v2, s1, s6, v0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1
+; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1
; GFX12-NEXT: v_add_co_u32 v0, s1, s4, v0
; GFX12-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2
; GFX12-NEXT: s_wait_alu 0xf1ff
@@ -438,9 +438,9 @@ define amdgpu_kernel void @copy_flat_divergent(ptr nocapture %d, ptr nocapture r
; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX12-SPREFETCH-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX12-SPREFETCH-NEXT: s_wait_kmcnt 0x0
-; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, v0, s6
+; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, s6, v0
; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1
+; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1
; GFX12-SPREFETCH-NEXT: v_add_co_u32 v0, s1, s4, v0
; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2
; GFX12-SPREFETCH-NEXT: s_wait_alu 0xf1ff
@@ -531,9 +531,9 @@ define amdgpu_kernel void @copy_global_divergent(ptr addrspace(1) nocapture %d,
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX12-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v2, s1, v0, s6
+; GFX12-NEXT: v_add_co_u32 v2, s1, s6, v0
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1
+; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1
; GFX12-NEXT: v_add_co_u32 v0, s1, s4, v0
; GFX12-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2
; GFX12-NEXT: s_wait_alu 0xf1ff
@@ -569,9 +569,9 @@ define amdgpu_kernel void @copy_global_divergent(ptr addrspace(1) nocapture %d,
; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX12-SPREFETCH-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX12-SPREFETCH-NEXT: s_wait_kmcnt 0x0
-; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, v0, s6
+; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, s6, v0
; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1
+; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1
; GFX12-SPREFETCH-NEXT: v_add_co_u32 v0, s1, s4, v0
; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2
; GFX12-SPREFETCH-NEXT: s_wait_alu 0xf1ff
diff --git a/llvm/test/CodeGen/AMDGPU/lower-module-lds-precise-allocate-to-module-struct.ll b/llvm/test/CodeGen/AMDGPU/lower-module-lds-precise-allocate-to-module-struct.ll
index 0de7f8f..bd29e9e 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-module-lds-precise-allocate-to-module-struct.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-module-lds-precise-allocate-to-module-struct.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-globals
-; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds --amdgpu-lower-module-lds-strategy=module < %s | FileCheck %s
+; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds < %s | FileCheck %s
; Regression test for issue 160181
; One variable is chosen to be assigned at zero. Here, that's @both
@@ -22,12 +22,20 @@
;.
; CHECK: @llvm.amdgcn.module.lds = internal addrspace(3) global %llvm.amdgcn.module.lds.t poison, align 4, !absolute_symbol [[META0:![0-9]+]]
; CHECK: @llvm.compiler.used = appending addrspace(1) global [1 x ptr] [ptr addrspacecast (ptr addrspace(3) @llvm.amdgcn.module.lds to ptr)], section "llvm.metadata"
+; CHECK: @llvm.amdgcn.kernel.kern_one.lds = internal addrspace(3) global %llvm.amdgcn.kernel.kern_one.lds.t poison, align 4, !absolute_symbol [[META1:![0-9]+]]
+; CHECK: @llvm.amdgcn.kernel.kern_two.lds = internal addrspace(3) global %llvm.amdgcn.kernel.kern_two.lds.t poison, align 4, !absolute_symbol [[META1]]
+; CHECK: @llvm.amdgcn.kernel.kern_block_direct_allocation.lds = internal addrspace(3) global %llvm.amdgcn.kernel.kern_block_direct_allocation.lds.t poison, align 4, !absolute_symbol [[META1]]
+
;.
define void @func_one() {
; CHECK-LABEL: define {{[^@]+}}@func_one() {
-; CHECK-NEXT: [[VAL0:%.*]] = load i32, ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !noalias [[META1:![0-9]+]]
-; CHECK-NEXT: store i32 [[VAL0]], ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1), align 4, !noalias [[META18:![0-9]+]]
-; CHECK-NEXT: store i16 10, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 3), align 4, !noalias [[META23:![0-9]+]]
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
+; CHECK-NEXT: [[VAL0:%.*]] = load i32, ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !noalias [[META2:![0-9]+]]
+; CHECK-NEXT: [[ONE:%.*]] = getelementptr inbounds [3 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[ONE]], align 4
+; CHECK-NEXT: [[ONE1:%.*]] = inttoptr i32 [[TMP2]] to ptr addrspace(3)
+; CHECK-NEXT: store i32 [[VAL0]], ptr addrspace(3) [[ONE1]], align 4
+; CHECK-NEXT: store i16 10, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1), align 4, !noalias [[META11:![0-9]+]]
; CHECK-NEXT: ret void
;
%val0 = load i32, ptr addrspace(3) @both
@@ -38,9 +46,10 @@ define void @func_one() {
define amdgpu_kernel void @kern_one() {
; CHECK-LABEL: define {{[^@]+}}@kern_one
-; CHECK-SAME: () #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: () #[[ATTR0:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META16:![0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ], !noalias [[META24:![0-9]+]]
+; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.kern_one.lds) ]
+; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ], !noalias [[META17:![0-9]+]]
; CHECK-NEXT: call void @func_one()
; CHECK-NEXT: ret void
;
@@ -51,9 +60,13 @@ entry:
define void @func_two() {
; CHECK-LABEL: define {{[^@]+}}@func_two() {
-; CHECK-NEXT: [[VAL0:%.*]] = load i32, ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !noalias [[META1]]
-; CHECK-NEXT: store i32 [[VAL0]], ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 2), align 4, !noalias [[META25:![0-9]+]]
-; CHECK-NEXT: store i16 20, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 3), align 4, !noalias [[META23]]
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
+; CHECK-NEXT: [[VAL0:%.*]] = load i32, ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !noalias [[META2]]
+; CHECK-NEXT: [[TWO:%.*]] = getelementptr inbounds [3 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[TWO]], align 4
+; CHECK-NEXT: [[TWO1:%.*]] = inttoptr i32 [[TMP2]] to ptr addrspace(3)
+; CHECK-NEXT: store i32 [[VAL0]], ptr addrspace(3) [[TWO1]], align 4
+; CHECK-NEXT: store i16 20, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1), align 4, !noalias [[META11]]
; CHECK-NEXT: ret void
;
%val0 = load i32, ptr addrspace(3) @both
@@ -64,9 +77,10 @@ define void @func_two() {
define amdgpu_kernel void @kern_two() {
; CHECK-LABEL: define {{[^@]+}}@kern_two
-; CHECK-SAME: () #[[ATTR0]] {
+; CHECK-SAME: () #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META18:![0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ], !alias.scope [[META26:![0-9]+]], !noalias [[META27:![0-9]+]]
+; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.kern_two.lds) ]
+; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ], !alias.scope [[META19:![0-9]+]], !noalias [[META20:![0-9]+]]
; CHECK-NEXT: call void @func_two()
; CHECK-NEXT: ret void
;
@@ -82,11 +96,18 @@ entry:
; remains the best candidate for address zero allocation.
define void @func_block_direct_allocation() {
; CHECK-LABEL: define {{[^@]+}}@func_block_direct_allocation() {
-; CHECK-NEXT: [[VAL1:%.*]] = load i32, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1), align 4, !noalias [[META18]]
-; CHECK-NEXT: [[VAL2:%.*]] = load i32, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 2), align 4, !noalias [[META25]]
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
+; CHECK-NEXT: [[ONE:%.*]] = getelementptr inbounds [3 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[ONE]], align 4
+; CHECK-NEXT: [[ONE1:%.*]] = inttoptr i32 [[TMP2]] to ptr addrspace(3)
+; CHECK-NEXT: [[VAL1:%.*]] = load i32, ptr addrspace(3) [[ONE1]], align 4
+; CHECK-NEXT: [[TWO:%.*]] = getelementptr inbounds [3 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TWO]], align 4
+; CHECK-NEXT: [[TWO2:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
+; CHECK-NEXT: [[VAL2:%.*]] = load i32, ptr addrspace(3) [[TWO2]], align 4
; CHECK-NEXT: [[SUM:%.*]] = add i32 [[VAL1]], [[VAL2]]
-; CHECK-NEXT: store i32 [[SUM]], ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !noalias [[META1]]
-; CHECK-NEXT: store i16 30, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 3), align 4, !noalias [[META23]]
+; CHECK-NEXT: store i32 [[SUM]], ptr addrspace(3) @llvm.amdgcn.module.lds, align 4, !noalias [[META2]]
+; CHECK-NEXT: store i16 30, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_MODULE_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.module.lds, i32 0, i32 1), align 4, !noalias [[META11]]
; CHECK-NEXT: ret void
;
%val1 = load i32, ptr addrspace(3) @one
@@ -99,7 +120,8 @@ define void @func_block_direct_allocation() {
define amdgpu_kernel void @kern_block_direct_allocation() {
; CHECK-LABEL: define {{[^@]+}}@kern_block_direct_allocation
-; CHECK-SAME: () #[[ATTR0]] {
+; CHECK-SAME: () #[[ATTR1:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META21:![0-9]+]] {
+; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.kern_block_direct_allocation.lds) ], !alias.scope [[META22:![0-9]+]], !noalias [[META25:![0-9]+]]
; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ]
; CHECK-NEXT: call void @func_block_direct_allocation()
; CHECK-NEXT: call void @func_one()
@@ -112,35 +134,8 @@ define amdgpu_kernel void @kern_block_direct_allocation() {
ret void
}
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-lds-size"="16" }
-; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
-;.
-; CHECK: [[META0]] = !{i32 0, i32 1}
-; CHECK: [[META1]] = !{[[META2:![0-9]+]], [[META4:![0-9]+]], [[META5:![0-9]+]], [[META6:![0-9]+]], [[META8:![0-9]+]], [[META9:![0-9]+]], [[META10:![0-9]+]], [[META12:![0-9]+]], [[META13:![0-9]+]], [[META14:![0-9]+]], [[META16:![0-9]+]], [[META17:![0-9]+]]}
-; CHECK: [[META2]] = distinct !{[[META2]], [[META3:![0-9]+]]}
-; CHECK: [[META3]] = distinct !{[[META3]]}
-; CHECK: [[META4]] = distinct !{[[META4]], [[META3]]}
-; CHECK: [[META5]] = distinct !{[[META5]], [[META3]]}
-; CHECK: [[META6]] = distinct !{[[META6]], [[META7:![0-9]+]]}
-; CHECK: [[META7]] = distinct !{[[META7]]}
-; CHECK: [[META8]] = distinct !{[[META8]], [[META7]]}
-; CHECK: [[META9]] = distinct !{[[META9]], [[META7]]}
-; CHECK: [[META10]] = distinct !{[[META10]], [[META11:![0-9]+]]}
-; CHECK: [[META11]] = distinct !{[[META11]]}
-; CHECK: [[META12]] = distinct !{[[META12]], [[META11]]}
-; CHECK: [[META13]] = distinct !{[[META13]], [[META11]]}
-; CHECK: [[META14]] = distinct !{[[META14]], [[META15:![0-9]+]]}
-; CHECK: [[META15]] = distinct !{[[META15]]}
-; CHECK: [[META16]] = distinct !{[[META16]], [[META15]]}
-; CHECK: [[META17]] = distinct !{[[META17]], [[META15]]}
-; CHECK: [[META18]] = !{[[META19:![0-9]+]], [[META2]], [[META5]], [[META20:![0-9]+]], [[META6]], [[META9]], [[META21:![0-9]+]], [[META10]], [[META13]], [[META22:![0-9]+]], [[META14]], [[META17]]}
-; CHECK: [[META19]] = distinct !{[[META19]], [[META3]]}
-; CHECK: [[META20]] = distinct !{[[META20]], [[META7]]}
-; CHECK: [[META21]] = distinct !{[[META21]], [[META11]]}
-; CHECK: [[META22]] = distinct !{[[META22]], [[META15]]}
-; CHECK: [[META23]] = !{[[META19]], [[META4]], [[META5]], [[META20]], [[META8]], [[META9]], [[META21]], [[META12]], [[META13]], [[META22]], [[META16]], [[META17]]}
-; CHECK: [[META24]] = !{[[META10]], [[META12]], [[META13]], [[META14]], [[META16]], [[META17]]}
-; CHECK: [[META25]] = !{[[META19]], [[META2]], [[META4]], [[META20]], [[META6]], [[META8]], [[META21]], [[META10]], [[META12]], [[META22]], [[META14]], [[META16]]}
-; CHECK: [[META26]] = !{[[META22]]}
-; CHECK: [[META27]] = !{[[META14]], [[META16]], [[META17]]}
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-lds-size"="12" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-lds-size"="16" }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
+; CHECK: attributes #[[ATTR3:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-hybrid.ll b/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-hybrid.ll
index b6f70fa..12212a0 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-hybrid.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-hybrid.ll
@@ -84,8 +84,8 @@ define void @f2() {
; GCN-NEXT: s_add_u32 s6, s6, llvm.amdgcn.lds.offset.table@rel32@lo+4
; GCN-NEXT: s_addc_u32 s7, s7, llvm.amdgcn.lds.offset.table@rel32@hi+12
; GCN-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
-; GCN-NEXT: s_add_u32 s4, s4, s6
-; GCN-NEXT: s_addc_u32 s5, s5, s7
+; GCN-NEXT: s_add_u32 s4, s6, s4
+; GCN-NEXT: s_addc_u32 s5, s7, s5
; GCN-NEXT: s_load_dword s4, s[4:5], 0x0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v2, s4
diff --git a/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-table.ll b/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-table.ll
index c316f03..b689e1e 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-table.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-table.ll
@@ -49,8 +49,8 @@ define void @f0() {
; GCN-NEXT: s_add_u32 s6, s6, llvm.amdgcn.lds.offset.table@rel32@lo+4
; GCN-NEXT: s_addc_u32 s7, s7, llvm.amdgcn.lds.offset.table@rel32@hi+12
; GCN-NEXT: s_lshl_b64 s[4:5], s[4:5], 4
-; GCN-NEXT: s_add_u32 s4, s4, s6
-; GCN-NEXT: s_addc_u32 s5, s5, s7
+; GCN-NEXT: s_add_u32 s4, s6, s4
+; GCN-NEXT: s_addc_u32 s5, s7, s5
; GCN-NEXT: s_load_dword s4, s[4:5], 0x0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v0, s4
@@ -90,8 +90,8 @@ define void @f1() {
; GCN-NEXT: s_add_u32 s6, s6, llvm.amdgcn.lds.offset.table@rel32@lo+8
; GCN-NEXT: s_addc_u32 s7, s7, llvm.amdgcn.lds.offset.table@rel32@hi+16
; GCN-NEXT: s_lshl_b64 s[4:5], s[4:5], 4
-; GCN-NEXT: s_add_u32 s4, s4, s6
-; GCN-NEXT: s_addc_u32 s5, s5, s7
+; GCN-NEXT: s_add_u32 s4, s6, s4
+; GCN-NEXT: s_addc_u32 s5, s7, s5
; GCN-NEXT: s_load_dword s4, s[4:5], 0x0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v0, s4
@@ -131,8 +131,8 @@ define void @f2() {
; GCN-NEXT: s_add_u32 s6, s6, llvm.amdgcn.lds.offset.table@rel32@lo+12
; GCN-NEXT: s_addc_u32 s7, s7, llvm.amdgcn.lds.offset.table@rel32@hi+20
; GCN-NEXT: s_lshl_b64 s[4:5], s[4:5], 4
-; GCN-NEXT: s_add_u32 s4, s4, s6
-; GCN-NEXT: s_addc_u32 s5, s5, s7
+; GCN-NEXT: s_add_u32 s4, s6, s4
+; GCN-NEXT: s_addc_u32 s5, s7, s5
; GCN-NEXT: s_load_dword s4, s[4:5], 0x0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v2, s4
@@ -172,8 +172,8 @@ define void @f3() {
; GCN-NEXT: s_add_u32 s6, s6, llvm.amdgcn.lds.offset.table@rel32@lo+16
; GCN-NEXT: s_addc_u32 s7, s7, llvm.amdgcn.lds.offset.table@rel32@hi+24
; GCN-NEXT: s_lshl_b64 s[4:5], s[4:5], 4
-; GCN-NEXT: s_add_u32 s4, s4, s6
-; GCN-NEXT: s_addc_u32 s5, s5, s7
+; GCN-NEXT: s_add_u32 s4, s6, s4
+; GCN-NEXT: s_addc_u32 s5, s7, s5
; GCN-NEXT: s_load_dword s4, s[4:5], 0x0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v0, s4
diff --git a/llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll b/llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll
index 65b4d37..93d772f 100644
--- a/llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll
+++ b/llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll
@@ -13,9 +13,9 @@ define amdgpu_kernel void @test(ptr addrspace(1) %src, ptr addrspace(1) %dst) {
; GFX9-NEXT: s_and_b32 s4, s4, 0xffff
; GFX9-NEXT: s_mul_i32 s14, s14, s4
; GFX9-NEXT: s_add_i32 s5, s5, s14
-; GFX9-NEXT: v_add_u32_e32 v0, s5, v0
-; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; GFX9-NEXT: v_lshlrev_b64 v[4:5], 4, v[0:1]
+; GFX9-NEXT: v_add_u32_e32 v1, s5, v0
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: v_ashrrev_i64 v[4:5], 28, v[0:1]
; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v4
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc
@@ -37,12 +37,12 @@ define amdgpu_kernel void @test(ptr addrspace(1) %src, ptr addrspace(1) %dst) {
; GFX10-NEXT: s_load_dword s4, s[8:9], 0x1c
; GFX10-NEXT: s_load_dword s5, s[8:9], 0x38
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
+; GFX10-NEXT: v_mov_b32_e32 v1, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_and_b32 s4, s4, 0xffff
; GFX10-NEXT: s_mul_i32 s14, s14, s4
-; GFX10-NEXT: v_add3_u32 v0, s5, s14, v0
-; GFX10-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; GFX10-NEXT: v_lshlrev_b64 v[4:5], 4, v[0:1]
+; GFX10-NEXT: v_add3_u32 v2, s5, s14, v0
+; GFX10-NEXT: v_ashrrev_i64 v[4:5], 28, v[1:2]
; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, s0, v4
; GFX10-NEXT: v_add_co_ci_u32_e64 v1, null, s1, v5, vcc_lo
; GFX10-NEXT: v_add_co_u32 v4, vcc_lo, s2, v4
@@ -62,21 +62,19 @@ define amdgpu_kernel void @test(ptr addrspace(1) %src, ptr addrspace(1) %dst) {
; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x1c
; GFX11-NEXT: s_load_b32 s7, s[4:5], 0x38
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_and_b32 v1, 0x3ff, v0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_and_b32 s4, s6, 0xffff
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_mul_i32 s13, s13, s4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_add3_u32 v0, s7, s13, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; GFX11-NEXT: v_lshlrev_b64 v[4:5], 4, v[0:1]
+; GFX11-NEXT: v_add3_u32 v1, s7, s13, v1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_ashrrev_i64 v[4:5], 28, v[0:1]
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, s0, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s1, v5, vcc_lo
; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, s2, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, s3, v5, vcc_lo
; GFX11-NEXT: global_load_b128 v[0:3], v[0:1], off
; GFX11-NEXT: s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
index c92c672..ca4f5d2 100644
--- a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
+++ b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
@@ -51,7 +51,7 @@ define void @issue63986(i64 %0, i64 %idxprom, ptr inreg %ptr) {
; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, s4, v2
; CHECK-NEXT: v_addc_co_u32_e32 v7, vcc, v3, v7, vcc
; CHECK-NEXT: s_add_u32 s4, s4, 1
-; CHECK-NEXT: s_addc_u32 s5, s5, 0
+; CHECK-NEXT: s_addc_u32 s5, 0, s5
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_store_byte v[6:7], v10
; CHECK-NEXT: ; %bb.7:
diff --git a/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll b/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll
index dd5c247..14b0729 100644
--- a/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll
+++ b/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll
@@ -388,8 +388,8 @@ define void @memmove_p0_p3(ptr addrspace(0) align 1 %dst, ptr addrspace(3) align
; CHECK-NEXT: s_and_saveexec_b32 s7, s4
; CHECK-NEXT: s_cbranch_execz .LBB2_13
; CHECK-NEXT: ; %bb.11: ; %memmove_bwd_residual_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v9, s4, v3, v0
-; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v4, v1, s4
+; CHECK-NEXT: v_add_co_u32 v9, s4, v0, v3
+; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v1, v4, s4
; CHECK-NEXT: v_add3_u32 v4, v3, v2, -1
; CHECK-NEXT: v_add_co_u32 v9, s4, v9, -1
; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, -1, v10, s4
@@ -684,8 +684,8 @@ define void @memmove_p0_p5(ptr addrspace(0) align 1 %dst, ptr addrspace(5) align
; CHECK-NEXT: s_and_saveexec_b32 s7, s4
; CHECK-NEXT: s_cbranch_execz .LBB4_13
; CHECK-NEXT: ; %bb.11: ; %memmove_bwd_residual_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v9, s4, v3, v0
-; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v4, v1, s4
+; CHECK-NEXT: v_add_co_u32 v9, s4, v0, v3
+; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v1, v4, s4
; CHECK-NEXT: v_add3_u32 v4, v3, v2, -1
; CHECK-NEXT: v_add_co_u32 v9, s4, v9, -1
; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, -1, v10, s4
@@ -1411,8 +1411,8 @@ define void @memmove_p3_p0(ptr addrspace(3) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: s_and_saveexec_b32 s7, s4
; CHECK-NEXT: s_cbranch_execz .LBB10_13
; CHECK-NEXT: ; %bb.11: ; %memmove_bwd_residual_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v9, s4, v3, v1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v4, v2, s4
+; CHECK-NEXT: v_add_co_u32 v9, s4, v1, v3
+; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v2, v4, s4
; CHECK-NEXT: v_add3_u32 v4, v3, v0, -1
; CHECK-NEXT: v_add_co_u32 v9, s4, v9, -1
; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, -1, v10, s4
@@ -1889,8 +1889,8 @@ define void @memmove_p5_p0(ptr addrspace(5) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: s_and_saveexec_b32 s7, s4
; CHECK-NEXT: s_cbranch_execz .LBB15_13
; CHECK-NEXT: ; %bb.11: ; %memmove_bwd_residual_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v9, s4, v3, v1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v4, v2, s4
+; CHECK-NEXT: v_add_co_u32 v9, s4, v1, v3
+; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, v2, v4, s4
; CHECK-NEXT: v_add3_u32 v4, v3, v0, -1
; CHECK-NEXT: v_add_co_u32 v9, s4, v9, -1
; CHECK-NEXT: v_add_co_ci_u32_e64 v10, null, -1, v10, s4
diff --git a/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll b/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll
index 6d0aa1e..7e4be65 100644
--- a/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll
+++ b/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll
@@ -9,92 +9,65 @@ define protected amdgpu_kernel void @no_folding_imm_to_inst_with_fi(<4 x i64> %v
; CHECK-NEXT: s_load_b512 s[16:31], s[4:5], 0xe4
; CHECK-NEXT: s_load_b512 s[0:15], s[4:5], 0xa4
; CHECK-NEXT: s_mov_b64 s[34:35], src_private_base
-; CHECK-NEXT: s_movk_i32 s33, 0x70
-; CHECK-NEXT: s_movk_i32 s34, 0x60
-; CHECK-NEXT: s_or_b32 s44, 0x80, s33
-; CHECK-NEXT: s_mov_b32 s45, s35
-; CHECK-NEXT: s_or_b32 s46, 0x80, s34
-; CHECK-NEXT: s_mov_b32 s47, s35
-; CHECK-NEXT: v_dual_mov_b32 v20, s44 :: v_dual_mov_b32 v21, s45
-; CHECK-NEXT: v_dual_mov_b32 v22, s46 :: v_dual_mov_b32 v23, s47
; CHECK-NEXT: s_movk_i32 s34, 0x80
; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: v_dual_mov_b32 v34, s34 :: v_dual_mov_b32 v35, s35
+; CHECK-NEXT: v_dual_mov_b32 v20, s34 :: v_dual_mov_b32 v21, s35
; CHECK-NEXT: s_wait_kmcnt 0x0
; CHECK-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v1, s41
; CHECK-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
; CHECK-NEXT: v_dual_mov_b32 v4, s36 :: v_dual_mov_b32 v5, s37
; CHECK-NEXT: v_dual_mov_b32 v6, s38 :: v_dual_mov_b32 v7, s39
-; CHECK-NEXT: scratch_store_b128 off, v[0:3], off offset:16 scope:SCOPE_SYS
-; CHECK-NEXT: s_wait_storecnt 0x0
-; CHECK-NEXT: v_dual_mov_b32 v0, s20 :: v_dual_mov_b32 v1, s21
-; CHECK-NEXT: s_movk_i32 s20, 0x50
; CHECK-NEXT: v_dual_mov_b32 v8, s28 :: v_dual_mov_b32 v9, s29
; CHECK-NEXT: v_dual_mov_b32 v10, s30 :: v_dual_mov_b32 v11, s31
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_or_b32 s20, 0x80, s20
-; CHECK-NEXT: s_mov_b32 s21, s35
; CHECK-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; CHECK-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
-; CHECK-NEXT: v_dual_mov_b32 v2, s22 :: v_dual_mov_b32 v3, s23
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: v_dual_mov_b32 v25, s21 :: v_dual_mov_b32 v24, s20
+; CHECK-NEXT: v_dual_mov_b32 v16, s20 :: v_dual_mov_b32 v17, s21
+; CHECK-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v19, s23
+; CHECK-NEXT: scratch_store_b128 off, v[0:3], off offset:16 scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
; CHECK-NEXT: scratch_store_b128 off, v[4:7], off scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
-; CHECK-NEXT: flat_store_b128 v[20:21], v[8:11] scope:SCOPE_SYS
+; CHECK-NEXT: flat_store_b128 v[20:21], v[8:11] offset:112 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
-; CHECK-NEXT: flat_store_b128 v[22:23], v[12:15] scope:SCOPE_SYS
+; CHECK-NEXT: flat_store_b128 v[20:21], v[12:15] offset:96 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
-; CHECK-NEXT: flat_store_b128 v[24:25], v[0:3] scope:SCOPE_SYS
+; CHECK-NEXT: flat_store_b128 v[20:21], v[16:19] offset:80 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
; CHECK-NEXT: v_dual_mov_b32 v0, s16 :: v_dual_mov_b32 v1, s17
-; CHECK-NEXT: s_or_b32 s16, 0x80, 64
-; CHECK-NEXT: s_mov_b32 s17, s35
-; CHECK-NEXT: v_dual_mov_b32 v4, s12 :: v_dual_mov_b32 v5, s13
-; CHECK-NEXT: s_or_b32 s12, 0x80, 48
-; CHECK-NEXT: s_mov_b32 s13, s35
-; CHECK-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; CHECK-NEXT: s_or_b32 s8, 0x80, 32
-; CHECK-NEXT: s_mov_b32 s9, s35
-; CHECK-NEXT: v_dual_mov_b32 v12, s4 :: v_dual_mov_b32 v13, s5
-; CHECK-NEXT: s_or_b32 s4, 0x80, 16
-; CHECK-NEXT: s_mov_b32 s5, s35
; CHECK-NEXT: v_dual_mov_b32 v2, s18 :: v_dual_mov_b32 v3, s19
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: v_dual_mov_b32 v27, s17 :: v_dual_mov_b32 v26, s16
+; CHECK-NEXT: v_dual_mov_b32 v4, s12 :: v_dual_mov_b32 v5, s13
; CHECK-NEXT: v_dual_mov_b32 v6, s14 :: v_dual_mov_b32 v7, s15
-; CHECK-NEXT: v_dual_mov_b32 v29, s13 :: v_dual_mov_b32 v28, s12
-; CHECK-NEXT: v_dual_mov_b32 v31, s9 :: v_dual_mov_b32 v30, s8
-; CHECK-NEXT: v_dual_mov_b32 v33, s5 :: v_dual_mov_b32 v32, s4
+; CHECK-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; CHECK-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
+; CHECK-NEXT: v_dual_mov_b32 v12, s4 :: v_dual_mov_b32 v13, s5
; CHECK-NEXT: v_dual_mov_b32 v14, s6 :: v_dual_mov_b32 v15, s7
; CHECK-NEXT: v_dual_mov_b32 v16, s0 :: v_dual_mov_b32 v17, s1
; CHECK-NEXT: v_dual_mov_b32 v18, s2 :: v_dual_mov_b32 v19, s3
-; CHECK-NEXT: flat_store_b128 v[26:27], v[0:3] scope:SCOPE_SYS
+; CHECK-NEXT: flat_store_b128 v[20:21], v[0:3] offset:64 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
-; CHECK-NEXT: flat_store_b128 v[28:29], v[4:7] scope:SCOPE_SYS
+; CHECK-NEXT: flat_store_b128 v[20:21], v[4:7] offset:48 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
-; CHECK-NEXT: flat_store_b128 v[30:31], v[8:11] scope:SCOPE_SYS
+; CHECK-NEXT: flat_store_b128 v[20:21], v[8:11] offset:32 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
-; CHECK-NEXT: flat_store_b128 v[32:33], v[12:15] scope:SCOPE_SYS
+; CHECK-NEXT: flat_store_b128 v[20:21], v[12:15] offset:16 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
-; CHECK-NEXT: flat_store_b128 v[34:35], v[16:19] scope:SCOPE_SYS
+; CHECK-NEXT: flat_store_b128 v[20:21], v[16:19] scope:SCOPE_SYS
; CHECK-NEXT: s_wait_storecnt 0x0
-; CHECK-NEXT: flat_load_b128 v[0:3], v[22:23] scope:SCOPE_SYS
+; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] offset:96 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] scope:SCOPE_SYS
+; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] offset:112 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: flat_load_b128 v[0:3], v[26:27] scope:SCOPE_SYS
+; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] offset:64 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: flat_load_b128 v[0:3], v[24:25] scope:SCOPE_SYS
+; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] offset:80 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: flat_load_b128 v[0:3], v[30:31] scope:SCOPE_SYS
+; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] offset:32 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: flat_load_b128 v[0:3], v[28:29] scope:SCOPE_SYS
+; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] offset:48 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: flat_load_b128 v[0:3], v[34:35] scope:SCOPE_SYS
+; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] scope:SCOPE_SYS
; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: flat_load_b128 v[0:3], v[32:33] scope:SCOPE_SYS
+; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] offset:16 scope:SCOPE_SYS
; CHECK-NEXT: s_wait_loadcnt 0x0
; CHECK-NEXT: s_endpgm
bb:
diff --git a/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir b/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir
index f1f2eb6..c9645c3 100644
--- a/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir
+++ b/llvm/test/CodeGen/AMDGPU/peephole-opt-regseq-removal.mir
@@ -80,3 +80,151 @@ body: |
%4:vreg_128 = REG_SEQUENCE %3.sub0, %subreg.sub0, %3.sub1, %subreg.sub1, %3.sub2, %subreg.sub2, %3.sub3, %subreg.sub3
KILL implicit %4
...
+
+---
+name: copy_vreg_64_subreg_from_vgpr_reg_sequence
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+ ; GCN-LABEL: name: copy_vreg_64_subreg_from_vgpr_reg_sequence
+ ; GCN: liveins: $vgpr0, $vgpr1
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GCN-NEXT: $vgpr0 = COPY [[COPY2]]
+ %0:vgpr_32 = COPY $vgpr0
+ %1:vgpr_32 = COPY $vgpr1
+ %2:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1
+ %3:vgpr_32 = COPY %2.sub0
+ $vgpr0 = COPY %3
+...
+
+---
+name: copy_vreg_64_subreg_from_vgpr_reg_sequence_extra_copy
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+ ; GCN-LABEL: name: copy_vreg_64_subreg_from_vgpr_reg_sequence_extra_copy
+ ; GCN: liveins: $vgpr0, $vgpr1
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0
+ ; GCN-NEXT: $vgpr0 = COPY [[COPY3]]
+ %0:vgpr_32 = COPY $vgpr0
+ %1:vgpr_32 = COPY $vgpr1
+ %2:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1
+ %3:vreg_64 = COPY %2
+ %4:vgpr_32 = COPY %3.sub0
+ $vgpr0 = COPY %4
+...
+
+---
+name: copy_av_64_subreg_from_vgpr_reg_sequence
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+ ; GCN-LABEL: name: copy_av_64_subreg_from_vgpr_reg_sequence
+ ; GCN: liveins: $vgpr0, $vgpr1
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:av_64_align2 = COPY [[REG_SEQUENCE]]
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0
+ ; GCN-NEXT: $vgpr0 = COPY [[COPY3]]
+ %0:vgpr_32 = COPY $vgpr0
+ %1:vgpr_32 = COPY $vgpr1
+ %2:vreg_64_align2 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1
+ %3:av_64_align2 = COPY %2
+ %4:vgpr_32 = COPY %3.sub0
+ $vgpr0 = COPY %4
+...
+
+---
+name: copy_vreg_64_subreg_from_vgpr_reg_sequence_with_sub0_compose
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1
+ ; GCN-LABEL: name: copy_vreg_64_subreg_from_vgpr_reg_sequence_with_sub0_compose
+ ; GCN: liveins: $vgpr0_vgpr1
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]].sub0, %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0
+ ; GCN-NEXT: $vgpr0 = COPY [[COPY2]]
+ %0:vreg_64 = COPY $vgpr0_vgpr1
+ %1:vgpr_32 = COPY $vgpr1
+ %2:vreg_64 = REG_SEQUENCE %0.sub0, %subreg.sub0, %1, %subreg.sub1
+ %3:vgpr_32 = COPY %2.sub0
+ $vgpr0 = COPY %3
+...
+
+---
+name: copy_vreg_64_subreg_from_vgpr_reg_sequence_with_sub1_compose
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1
+ ; GCN-LABEL: name: copy_vreg_64_subreg_from_vgpr_reg_sequence_with_sub1_compose
+ ; GCN: liveins: $vgpr0_vgpr1
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]].sub1, %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub1
+ ; GCN-NEXT: $vgpr0 = COPY [[COPY2]]
+ %0:vreg_64 = COPY $vgpr0_vgpr1
+ %1:vgpr_32 = COPY $vgpr1
+ %2:vreg_64 = REG_SEQUENCE %0.sub1, %subreg.sub0, %1, %subreg.sub1
+ %3:vgpr_32 = COPY %2.sub0
+ $vgpr0 = COPY %3
+...
+
+---
+name: copy_vreg_64_subreg_from_multiple_vgpr_reg_sequence
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-LABEL: name: copy_vreg_64_subreg_from_multiple_vgpr_reg_sequence
+ ; GCN: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GCN-NEXT: [[COPY4:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
+ ; GCN-NEXT: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub1_sub2
+ ; GCN-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+ ; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub0
+ ; GCN-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0
+ ; GCN-NEXT: $vgpr0_vgpr1 = COPY [[COPY4]]
+ ; GCN-NEXT: $vgpr2_vgpr3 = COPY [[COPY5]]
+ ; GCN-NEXT: $vgpr4_vgpr5 = COPY [[COPY6]]
+ ; GCN-NEXT: $vgpr6 = COPY [[COPY7]]
+ ; GCN-NEXT: $vgpr6 = COPY [[COPY8]]
+ %0:vgpr_32 = COPY $vgpr0
+ %1:vgpr_32 = COPY $vgpr1
+ %2:vgpr_32 = COPY $vgpr2
+ %3:vgpr_32 = COPY $vgpr3
+ %4:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1
+ %5:vreg_64 = REG_SEQUENCE %2, %subreg.sub0, %3, %subreg.sub1
+ %6:vreg_128 = REG_SEQUENCE %4, %subreg.sub0_sub1, %5, %subreg.sub2_sub3
+ %7:vreg_64 = COPY %6.sub0_sub1
+ %8:vreg_64 = COPY %6.sub1_sub2
+ %9:vreg_64 = COPY %6.sub2_sub3
+ %10:vgpr_32 = COPY %6.sub2
+ %11:vgpr_32 = COPY %6.sub0
+ $vgpr0_vgpr1 = COPY %7
+ $vgpr2_vgpr3 = COPY %8
+ $vgpr4_vgpr5 = COPY %9
+ $vgpr6 = COPY %10
+ $vgpr6 = COPY %11
+...
diff --git a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll
index f5e136a..b717f85 100644
--- a/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll
+++ b/llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll
@@ -337,8 +337,7 @@ define amdgpu_kernel void @random_incorrect_offset(ptr addrspace(1) inreg %out)
; GFX942-NEXT: .p2align 8
; GFX942-NEXT: ; %bb.2:
; GFX942-NEXT: .LBB8_0:
-; GFX942-NEXT: s_mov_b32 s4, 8
-; GFX942-NEXT: s_load_dword s0, s[0:1], s4 offset:0x2
+; GFX942-NEXT: s_load_dword s0, s[0:1], 0xa
; GFX942-NEXT: v_mov_b32_e32 v0, 0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v1, s0
@@ -353,8 +352,7 @@ define amdgpu_kernel void @random_incorrect_offset(ptr addrspace(1) inreg %out)
; GFX90a-NEXT: .p2align 8
; GFX90a-NEXT: ; %bb.2:
; GFX90a-NEXT: .LBB8_0:
-; GFX90a-NEXT: s_mov_b32 s0, 8
-; GFX90a-NEXT: s_load_dword s0, s[4:5], s0 offset:0x2
+; GFX90a-NEXT: s_load_dword s0, s[4:5], 0xa
; GFX90a-NEXT: v_mov_b32_e32 v0, 0
; GFX90a-NEXT: s_waitcnt lgkmcnt(0)
; GFX90a-NEXT: v_mov_b32_e32 v1, s0
diff --git a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
index aa131ed..85a9aba 100644
--- a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
+++ b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
@@ -495,8 +495,7 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX900-NEXT: v_mov_b32_e32 v1, s35
; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, s34, v0
; GFX900-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX900-NEXT: s_movk_i32 s0, 0x5000
-; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
+; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, 0x5000, v0
; GFX900-NEXT: v_mov_b32_e32 v4, 0
; GFX900-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX900-NEXT: v_mov_b32_e32 v5, 0
@@ -609,8 +608,8 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX10-NEXT: v_mov_b32_e32 v7, 0x7f
; GFX10-NEXT: v_and_b32_e32 v6, 0xfe000000, v1
; GFX10-NEXT: v_lshl_or_b32 v0, v0, 3, v6
-; GFX10-NEXT: v_add_co_u32 v0, s0, v0, s34
-; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s0, 0, s35, s0
+; GFX10-NEXT: v_add_co_u32 v0, s0, s34, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s0, s35, 0, s0
; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, 0x5000, v0
; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
; GFX10-NEXT: .LBB1_1: ; %for.cond.preheader
@@ -718,8 +717,7 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX90A-NEXT: v_mov_b32_e32 v2, s35
; GFX90A-NEXT: v_add_co_u32_e32 v1, vcc, s34, v1
; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v2, vcc
-; GFX90A-NEXT: s_movk_i32 s0, 0x5000
-; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, s0, v1
+; GFX90A-NEXT: v_add_co_u32_e32 v2, vcc, 0x5000, v1
; GFX90A-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX90A-NEXT: v_pk_mov_b32 v[4:5], 0, 0
; GFX90A-NEXT: v_mov_b32_e32 v1, 0x7f
@@ -821,8 +819,8 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1) %buffer) {
; GFX11-NEXT: v_and_b32_e32 v6, 0xfe000000, v1
; GFX11-NEXT: v_lshl_or_b32 v0, v0, 3, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v0, s0, v0, s34
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, s35, s0
+; GFX11-NEXT: v_add_co_u32 v0, s0, s34, v0
+; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s35, 0, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, 0x5000, v0
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
diff --git a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-mubuf.ll b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-mubuf.ll
index ff90f1f..40f39a2 100644
--- a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-mubuf.ll
+++ b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-mubuf.ll
@@ -1,6 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=tahiti -amdgpu-use-sdag-ptradd=1 < %s | FileCheck --check-prefixes=GFX6,GFX6_PTRADD %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=tahiti -amdgpu-use-sdag-ptradd=0 < %s | FileCheck --check-prefixes=GFX6,GFX6_LEGACY %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=tahiti < %s | FileCheck --check-prefixes=GFX6 %s
; Test PTRADD handling in AMDGPUDAGToDAGISel::SelectMUBUF.
@@ -34,7 +33,3 @@ define amdgpu_kernel void @v_add_i32(ptr addrspace(1) %out, ptr addrspace(1) %in
store i32 %result, ptr addrspace(1) %out
ret void
}
-
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GFX6_LEGACY: {{.*}}
-; GFX6_PTRADD: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll
index 7d3b19e..1c986a0 100644
--- a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll
+++ b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll
@@ -1,6 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -disable-separate-const-offset-from-gep=1 -amdgpu-use-sdag-ptradd=1 < %s | FileCheck --check-prefixes=GFX942,GFX942_PTRADD %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -disable-separate-const-offset-from-gep=1 -amdgpu-use-sdag-ptradd=0 < %s | FileCheck --check-prefixes=GFX942,GFX942_LEGACY %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -disable-separate-const-offset-from-gep=1 < %s | FileCheck --check-prefixes=GFX942 %s
; Tests for DAG combines and folds related to the ISD::PTRADD SelectionDAG
; opcode. The RUN lines uses -disable-separate-const-offset-from-gep to disable
@@ -24,21 +23,13 @@ define i64 @global_load_ZTwoUses(ptr addrspace(1) %base, i64 %voffset) {
}
define i64 @global_load_gep_add_reassoc(ptr addrspace(1) %base, i64 %voffset) {
-; GFX942_PTRADD-LABEL: global_load_gep_add_reassoc:
-; GFX942_PTRADD: ; %bb.0:
-; GFX942_PTRADD-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942_PTRADD-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
-; GFX942_PTRADD-NEXT: global_load_dwordx2 v[0:1], v[0:1], off offset:24
-; GFX942_PTRADD-NEXT: s_waitcnt vmcnt(0)
-; GFX942_PTRADD-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX942_LEGACY-LABEL: global_load_gep_add_reassoc:
-; GFX942_LEGACY: ; %bb.0:
-; GFX942_LEGACY-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942_LEGACY-NEXT: v_lshl_add_u64 v[0:1], v[2:3], 0, v[0:1]
-; GFX942_LEGACY-NEXT: global_load_dwordx2 v[0:1], v[0:1], off offset:24
-; GFX942_LEGACY-NEXT: s_waitcnt vmcnt(0)
-; GFX942_LEGACY-NEXT: s_setpc_b64 s[30:31]
+; GFX942-LABEL: global_load_gep_add_reassoc:
+; GFX942: ; %bb.0:
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, v[2:3]
+; GFX942-NEXT: global_load_dwordx2 v[0:1], v[0:1], off offset:24
+; GFX942-NEXT: s_waitcnt vmcnt(0)
+; GFX942-NEXT: s_setpc_b64 s[30:31]
%add0 = add nuw nsw i64 %voffset, 24
%gep0 = getelementptr nuw inbounds i8, ptr addrspace(1) %base, i64 %add0
%l = load i64, ptr addrspace(1) %gep0, align 8
@@ -221,23 +212,14 @@ define ptr addrspace(1) @shl_neg_offset(ptr addrspace(1) %p, i64 %noffset, i64 %
; Check that offsets are folded into global addresses if possible. For example,
; this is relevant when using --amdgpu-lower-module-lds-strategy=table.
define ptr addrspace(1) @complextype_global_gep(i64 %offset) {
-; GFX942_PTRADD-LABEL: complextype_global_gep:
-; GFX942_PTRADD: ; %bb.0:
-; GFX942_PTRADD-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942_PTRADD-NEXT: s_getpc_b64 s[0:1]
-; GFX942_PTRADD-NEXT: s_add_u32 s0, s0, v0@rel32@lo+14
-; GFX942_PTRADD-NEXT: s_addc_u32 s1, s1, v0@rel32@hi+22
-; GFX942_PTRADD-NEXT: v_lshl_add_u64 v[0:1], s[0:1], 0, v[0:1]
-; GFX942_PTRADD-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX942_LEGACY-LABEL: complextype_global_gep:
-; GFX942_LEGACY: ; %bb.0:
-; GFX942_LEGACY-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942_LEGACY-NEXT: s_getpc_b64 s[0:1]
-; GFX942_LEGACY-NEXT: s_add_u32 s0, s0, v0@rel32@lo+14
-; GFX942_LEGACY-NEXT: s_addc_u32 s1, s1, v0@rel32@hi+22
-; GFX942_LEGACY-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, s[0:1]
-; GFX942_LEGACY-NEXT: s_setpc_b64 s[30:31]
+; GFX942-LABEL: complextype_global_gep:
+; GFX942: ; %bb.0:
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: s_getpc_b64 s[0:1]
+; GFX942-NEXT: s_add_u32 s0, s0, v0@rel32@lo+14
+; GFX942-NEXT: s_addc_u32 s1, s1, v0@rel32@hi+22
+; GFX942-NEXT: v_lshl_add_u64 v[0:1], s[0:1], 0, v[0:1]
+; GFX942-NEXT: s_setpc_b64 s[30:31]
%gep0 = getelementptr inbounds %complextype, ptr addrspace(1) @v0, i64 0, i32 1, i64 %offset
%gep1 = getelementptr inbounds i8, ptr addrspace(1) %gep0, i64 2
ret ptr addrspace(1) %gep1
@@ -430,36 +412,20 @@ define ptr @gep_disjoint_or(ptr %base) {
; Check that AssertAlign nodes between ptradd nodes don't block offset folding,
; taken from preload-implicit-kernargs.ll
define amdgpu_kernel void @random_incorrect_offset(ptr addrspace(1) inreg %out) {
-; GFX942_PTRADD-LABEL: random_incorrect_offset:
-; GFX942_PTRADD: ; %bb.1:
-; GFX942_PTRADD-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0
-; GFX942_PTRADD-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942_PTRADD-NEXT: s_branch .LBB21_0
-; GFX942_PTRADD-NEXT: .p2align 8
-; GFX942_PTRADD-NEXT: ; %bb.2:
-; GFX942_PTRADD-NEXT: .LBB21_0:
-; GFX942_PTRADD-NEXT: s_load_dword s0, s[4:5], 0xa
-; GFX942_PTRADD-NEXT: v_mov_b32_e32 v0, 0
-; GFX942_PTRADD-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942_PTRADD-NEXT: v_mov_b32_e32 v1, s0
-; GFX942_PTRADD-NEXT: global_store_dword v0, v1, s[8:9]
-; GFX942_PTRADD-NEXT: s_endpgm
-;
-; GFX942_LEGACY-LABEL: random_incorrect_offset:
-; GFX942_LEGACY: ; %bb.1:
-; GFX942_LEGACY-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0
-; GFX942_LEGACY-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942_LEGACY-NEXT: s_branch .LBB21_0
-; GFX942_LEGACY-NEXT: .p2align 8
-; GFX942_LEGACY-NEXT: ; %bb.2:
-; GFX942_LEGACY-NEXT: .LBB21_0:
-; GFX942_LEGACY-NEXT: s_mov_b32 s0, 8
-; GFX942_LEGACY-NEXT: s_load_dword s0, s[4:5], s0 offset:0x2
-; GFX942_LEGACY-NEXT: v_mov_b32_e32 v0, 0
-; GFX942_LEGACY-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942_LEGACY-NEXT: v_mov_b32_e32 v1, s0
-; GFX942_LEGACY-NEXT: global_store_dword v0, v1, s[8:9]
-; GFX942_LEGACY-NEXT: s_endpgm
+; GFX942-LABEL: random_incorrect_offset:
+; GFX942: ; %bb.1:
+; GFX942-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0
+; GFX942-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-NEXT: s_branch .LBB21_0
+; GFX942-NEXT: .p2align 8
+; GFX942-NEXT: ; %bb.2:
+; GFX942-NEXT: .LBB21_0:
+; GFX942-NEXT: s_load_dword s0, s[4:5], 0xa
+; GFX942-NEXT: v_mov_b32_e32 v0, 0
+; GFX942-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-NEXT: v_mov_b32_e32 v1, s0
+; GFX942-NEXT: global_store_dword v0, v1, s[8:9]
+; GFX942-NEXT: s_endpgm
%imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
%gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 2
%load = load i32, ptr addrspace(4) %gep
diff --git a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-undef-poison.ll b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-undef-poison.ll
index 1934ce3..e7c715f 100644
--- a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-undef-poison.ll
+++ b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-undef-poison.ll
@@ -1,6 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -start-before=amdgpu-isel -amdgpu-use-sdag-ptradd=1 < %s | FileCheck --check-prefixes=GFX942,GFX942_PTRADD %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -start-before=amdgpu-isel -amdgpu-use-sdag-ptradd=0 < %s | FileCheck --check-prefixes=GFX942,GFX942_LEGACY %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -start-before=amdgpu-isel < %s | FileCheck --check-prefixes=GFX942 %s
; Tests for undef and poison DAG folds for the ISD::PTRADD SelectionDAG opcode.
; If any additions are generated for these tests, the folds don't work.
@@ -44,6 +43,3 @@ define ptr @undef_base(ptr %p, i64 %offset) {
%gep1 = getelementptr i8, ptr undef, i64 %offset
ret ptr %gep1
}
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GFX942_LEGACY: {{.*}}
-; GFX942_PTRADD: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/ptradd-sdag.ll b/llvm/test/CodeGen/AMDGPU/ptradd-sdag.ll
index 9dd2502..f4f5a78 100644
--- a/llvm/test/CodeGen/AMDGPU/ptradd-sdag.ll
+++ b/llvm/test/CodeGen/AMDGPU/ptradd-sdag.ll
@@ -1,14 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -amdgpu-use-sdag-ptradd=1 < %s | FileCheck %s -check-prefixes=GFX8,GFX8_PTRADD
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -amdgpu-use-sdag-ptradd=0 < %s | FileCheck %s -check-prefixes=GFX8,GFX8_LEGACY
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -amdgpu-use-sdag-ptradd=1 < %s | FileCheck %s -check-prefixes=GFX942,GFX942_PTRADD
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -amdgpu-use-sdag-ptradd=0 < %s | FileCheck %s -check-prefixes=GFX942,GFX942_LEGACY
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 -amdgpu-use-sdag-ptradd=1 < %s | FileCheck %s -check-prefixes=GFX10,GFX10_PTRADD
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 -amdgpu-use-sdag-ptradd=0 < %s | FileCheck %s -check-prefixes=GFX10,GFX10_LEGACY
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -amdgpu-use-sdag-ptradd=1 < %s | FileCheck %s -check-prefixes=GFX11,GFX11_PTRADD
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -amdgpu-use-sdag-ptradd=0 < %s | FileCheck %s -check-prefixes=GFX11,GFX11_LEGACY
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -amdgpu-use-sdag-ptradd=1 < %s | FileCheck %s -check-prefixes=GFX12,GFX12_PTRADD
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -amdgpu-use-sdag-ptradd=0 < %s | FileCheck %s -check-prefixes=GFX12,GFX12_LEGACY
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji < %s | FileCheck %s -check-prefixes=GFX8
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 < %s | FileCheck %s -check-prefixes=GFX942
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 < %s | FileCheck %s -check-prefixes=GFX10
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 < %s | FileCheck %s -check-prefixes=GFX11
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 < %s | FileCheck %s -check-prefixes=GFX12
; Tests for the ISD::PTRADD SelectionDAG opcode. This only tests 64-bit address
; spaces since PTRADD is currently only used for these.
@@ -511,15 +506,3 @@ entry:
store i32 %val, ptr addrspace(1) %gep.to, align 4
ret void
}
-
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GFX10_LEGACY: {{.*}}
-; GFX10_PTRADD: {{.*}}
-; GFX11_LEGACY: {{.*}}
-; GFX11_PTRADD: {{.*}}
-; GFX12_LEGACY: {{.*}}
-; GFX12_PTRADD: {{.*}}
-; GFX8_LEGACY: {{.*}}
-; GFX8_PTRADD: {{.*}}
-; GFX942_LEGACY: {{.*}}
-; GFX942_PTRADD: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/resource-usage-dead-function.ll b/llvm/test/CodeGen/AMDGPU/resource-usage-dead-function.ll
index 5d5aad7..566eb1e 100644
--- a/llvm/test/CodeGen/AMDGPU/resource-usage-dead-function.ll
+++ b/llvm/test/CodeGen/AMDGPU/resource-usage-dead-function.ll
@@ -7,16 +7,12 @@
@gv.fptr0 = external hidden unnamed_addr addrspace(4) constant ptr, align 4
-; GCN-LABEL: unreachable:
-; Function info:
-; codeLenInByte = 4
define internal fastcc void @unreachable() {
%fptr = load ptr, ptr addrspace(4) @gv.fptr0
call void %fptr()
unreachable
}
-
; GCN-LABEL: entry:
; GCN-NOT: s_swappc_b64
; GCN: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/store-weird-sizes.ll b/llvm/test/CodeGen/AMDGPU/store-weird-sizes.ll
index 65a99d0..480eb0d 100644
--- a/llvm/test/CodeGen/AMDGPU/store-weird-sizes.ll
+++ b/llvm/test/CodeGen/AMDGPU/store-weird-sizes.ll
@@ -52,11 +52,12 @@ define amdgpu_kernel void @local_store_i55(ptr addrspace(3) %ptr, i55 %arg) #0 {
; HAWAII-LABEL: local_store_i55:
; HAWAII: ; %bb.0:
; HAWAII-NEXT: s_add_i32 s12, s12, s17
-; HAWAII-NEXT: s_or_b32 s0, s8, 14
-; HAWAII-NEXT: s_mov_b32 flat_scratch_lo, s13
; HAWAII-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
+; HAWAII-NEXT: s_add_u32 s0, s8, 14
+; HAWAII-NEXT: s_addc_u32 s1, s9, 0
; HAWAII-NEXT: v_mov_b32_e32 v0, s0
-; HAWAII-NEXT: v_mov_b32_e32 v1, s9
+; HAWAII-NEXT: s_mov_b32 flat_scratch_lo, s13
+; HAWAII-NEXT: v_mov_b32_e32 v1, s1
; HAWAII-NEXT: flat_load_ubyte v0, v[0:1]
; HAWAII-NEXT: s_load_dword s2, s[8:9], 0x0
; HAWAII-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x2
@@ -74,25 +75,27 @@ define amdgpu_kernel void @local_store_i55(ptr addrspace(3) %ptr, i55 %arg) #0 {
;
; FIJI-LABEL: local_store_i55:
; FIJI: ; %bb.0:
+; FIJI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x8
; FIJI-NEXT: s_add_i32 s12, s12, s17
-; FIJI-NEXT: s_or_b32 s0, s8, 14
-; FIJI-NEXT: s_mov_b32 flat_scratch_lo, s13
; FIJI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
-; FIJI-NEXT: v_mov_b32_e32 v0, s0
-; FIJI-NEXT: v_mov_b32_e32 v1, s9
-; FIJI-NEXT: flat_load_ubyte v0, v[0:1]
-; FIJI-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x8
-; FIJI-NEXT: s_load_dword s2, s[8:9], 0x0
+; FIJI-NEXT: s_mov_b32 flat_scratch_lo, s13
; FIJI-NEXT: s_mov_b32 m0, -1
; FIJI-NEXT: s_waitcnt lgkmcnt(0)
-; FIJI-NEXT: s_and_b32 s3, s1, 0xffff
-; FIJI-NEXT: v_mov_b32_e32 v1, s2
+; FIJI-NEXT: s_and_b32 s4, s1, 0xffff
+; FIJI-NEXT: s_add_u32 s2, s8, 14
+; FIJI-NEXT: s_addc_u32 s3, s9, 0
+; FIJI-NEXT: v_mov_b32_e32 v0, s2
+; FIJI-NEXT: v_mov_b32_e32 v1, s3
+; FIJI-NEXT: flat_load_ubyte v0, v[0:1]
+; FIJI-NEXT: s_load_dword s2, s[8:9], 0x0
; FIJI-NEXT: v_mov_b32_e32 v2, s1
; FIJI-NEXT: v_mov_b32_e32 v3, s0
+; FIJI-NEXT: s_waitcnt lgkmcnt(0)
+; FIJI-NEXT: v_mov_b32_e32 v1, s2
; FIJI-NEXT: ds_write_b16 v1, v2 offset:4
; FIJI-NEXT: s_waitcnt vmcnt(0)
; FIJI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; FIJI-NEXT: v_or_b32_e32 v0, s3, v0
+; FIJI-NEXT: v_or_b32_e32 v0, s4, v0
; FIJI-NEXT: v_bfe_u32 v0, v0, 16, 7
; FIJI-NEXT: ds_write_b8 v1, v0 offset:6
; FIJI-NEXT: ds_write_b32 v1, v3
diff --git a/llvm/test/CodeGen/ARM/inline-asm-clobber.ll b/llvm/test/CodeGen/ARM/inline-asm-clobber.ll
index 7b1331f..f44ad2a 100644
--- a/llvm/test/CodeGen/ARM/inline-asm-clobber.ll
+++ b/llvm/test/CodeGen/ARM/inline-asm-clobber.ll
@@ -6,12 +6,19 @@
; RUN: llc <%s -mtriple=arm-none-eabi --frame-pointer=all 2>&1 \
; RUN: | FileCheck %s -check-prefix=NO_FP_ELIM
+; RUN: llc <%s -mtriple=armv6-apple-ios2 2>&1 | FileCheck %s -check-prefix=IOS2
+; RUN: llc <%s -mtriple=armv6k-apple-ios2 2>&1 | FileCheck %s -check-prefix=IOS2
+; RUN: llc <%s -mtriple=armv6k-apple-ios3 2>&1 | FileCheck %s -check-prefix=IOS3
+; RUN: llc <%s -mtriple=armv7-apple-ios2 2>&1 | FileCheck %s -check-prefix=IOS3
+
; CHECK: warning: inline asm clobber list contains reserved registers: SP, PC
; CHECK: warning: inline asm clobber list contains reserved registers: R11
; RWPI: warning: inline asm clobber list contains reserved registers: R9, SP, PC
; RWPI: warning: inline asm clobber list contains reserved registers: R11
; NO_FP_ELIM: warning: inline asm clobber list contains reserved registers: R11, SP, PC
; NO_FP_ELIM: warning: inline asm clobber list contains reserved registers: R11
+; IOS2: warning: inline asm clobber list contains reserved registers: R9, SP, PC
+; IOS3: warning: inline asm clobber list contains reserved registers: SP, PC
define void @foo() nounwind {
call void asm sideeffect "mov r7, #1",
diff --git a/llvm/test/CodeGen/ARM/llrint-conv.ll b/llvm/test/CodeGen/ARM/llrint-conv.ll
index 749ee00..a1a04db 100644
--- a/llvm/test/CodeGen/ARM/llrint-conv.ll
+++ b/llvm/test/CodeGen/ARM/llrint-conv.ll
@@ -1,46 +1,71 @@
-; RUN: llc < %s -mtriple=arm-eabi -float-abi=soft | FileCheck %s --check-prefix=SOFTFP
-; RUN: llc < %s -mtriple=arm-eabi -float-abi=hard | FileCheck %s --check-prefix=HARDFP
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=armv7-none-eabi -float-abi=soft | FileCheck %s --check-prefixes=CHECK,CHECK-SOFT
+; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+vfp2 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-NOFP16
+; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+vfp2,+fullfp16 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-FP16
-; SOFTFP-LABEL: testmsxh_builtin:
-; SOFTFP: bl llrintf
-; HARDFP-LABEL: testmsxh_builtin:
-; HARDFP: bl llrintf
define i64 @testmsxh_builtin(half %x) {
+; CHECK-SOFT-LABEL: testmsxh_builtin:
+; CHECK-SOFT: @ %bb.0: @ %entry
+; CHECK-SOFT-NEXT: .save {r11, lr}
+; CHECK-SOFT-NEXT: push {r11, lr}
+; CHECK-SOFT-NEXT: bl __aeabi_h2f
+; CHECK-SOFT-NEXT: bl llrintf
+; CHECK-SOFT-NEXT: pop {r11, pc}
+;
+; CHECK-NOFP16-LABEL: testmsxh_builtin:
+; CHECK-NOFP16: @ %bb.0: @ %entry
+; CHECK-NOFP16-NEXT: .save {r11, lr}
+; CHECK-NOFP16-NEXT: push {r11, lr}
+; CHECK-NOFP16-NEXT: vmov r0, s0
+; CHECK-NOFP16-NEXT: bl __aeabi_h2f
+; CHECK-NOFP16-NEXT: vmov s0, r0
+; CHECK-NOFP16-NEXT: bl llrintf
+; CHECK-NOFP16-NEXT: pop {r11, pc}
+;
+; CHECK-FP16-LABEL: testmsxh_builtin:
+; CHECK-FP16: @ %bb.0: @ %entry
+; CHECK-FP16-NEXT: .save {r11, lr}
+; CHECK-FP16-NEXT: push {r11, lr}
+; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FP16-NEXT: bl llrintf
+; CHECK-FP16-NEXT: pop {r11, pc}
entry:
%0 = tail call i64 @llvm.llrint.i64.f16(half %x)
ret i64 %0
}
-; SOFTFP-LABEL: testmsxs_builtin:
-; SOFTFP: bl llrintf
-; HARDFP-LABEL: testmsxs_builtin:
-; HARDFP: bl llrintf
define i64 @testmsxs_builtin(float %x) {
+; CHECK-LABEL: testmsxs_builtin:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bl llrintf
+; CHECK-NEXT: pop {r11, pc}
entry:
%0 = tail call i64 @llvm.llrint.i64.f32(float %x)
ret i64 %0
}
-; SOFTFP-LABEL: testmsxd_builtin:
-; SOFTFP: bl llrint
-; HARDFP-LABEL: testmsxd_builtin:
-; HARDFP: bl llrint
define i64 @testmsxd_builtin(double %x) {
+; CHECK-LABEL: testmsxd_builtin:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bl llrint
+; CHECK-NEXT: pop {r11, pc}
entry:
%0 = tail call i64 @llvm.llrint.i64.f64(double %x)
ret i64 %0
}
-; FIXME(#44744): incorrect libcall
-; SOFTFP-LABEL: testmsxq_builtin:
-; SOFTFP: bl llrintl
-; HARDFP-LABEL: testmsxq_builtin:
-; HARDFP: bl llrintl
define i64 @testmsxq_builtin(fp128 %x) {
+; CHECK-LABEL: testmsxq_builtin:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bl llrintl
+; CHECK-NEXT: pop {r11, pc}
entry:
%0 = tail call i64 @llvm.llrint.i64.f128(fp128 %x)
ret i64 %0
}
-
-declare i64 @llvm.llrint.i64.f32(float) nounwind readnone
-declare i64 @llvm.llrint.i64.f64(double) nounwind readnone
diff --git a/llvm/test/CodeGen/ARM/llvm.exp10.ll b/llvm/test/CodeGen/ARM/llvm.exp10.ll
index eb72fe8..49397ca 100644
--- a/llvm/test/CodeGen/ARM/llvm.exp10.ll
+++ b/llvm/test/CodeGen/ARM/llvm.exp10.ll
@@ -189,12 +189,13 @@ define <3 x float> @exp10_v3f32(<3 x float> %x) {
; CHECK-NEXT: mov r6, r0
; CHECK-NEXT: mov r0, r4
; CHECK-NEXT: bl exp10f
+; CHECK-NEXT: mov r4, r0
; CHECK-NEXT: vmov s17, r0
; CHECK-NEXT: mov r0, r5
; CHECK-NEXT: bl exp10f
; CHECK-NEXT: vmov s16, r0
+; CHECK-NEXT: mov r1, r4
; CHECK-NEXT: vmov s18, r6
-; CHECK-NEXT: vmov r0, r1, d8
; CHECK-NEXT: vmov r2, r3, d9
; CHECK-NEXT: vpop {d8, d9}
; CHECK-NEXT: pop {r4, r5, r6, pc}
@@ -207,7 +208,6 @@ define <4 x float> @exp10_v4f32(<4 x float> %x) {
; CHECK: @ %bb.0:
; CHECK-NEXT: push {r4, r5, r6, r7, lr}
; CHECK-NEXT: sub sp, #4
-; CHECK-NEXT: vpush {d8, d9}
; CHECK-NEXT: mov r6, r0
; CHECK-NEXT: mov r0, r1
; CHECK-NEXT: mov r4, r3
@@ -216,17 +216,15 @@ define <4 x float> @exp10_v4f32(<4 x float> %x) {
; CHECK-NEXT: mov r7, r0
; CHECK-NEXT: mov r0, r4
; CHECK-NEXT: bl exp10f
-; CHECK-NEXT: vmov s19, r0
+; CHECK-NEXT: mov r4, r0
; CHECK-NEXT: mov r0, r5
; CHECK-NEXT: bl exp10f
-; CHECK-NEXT: vmov s18, r0
+; CHECK-NEXT: mov r5, r0
; CHECK-NEXT: mov r0, r6
-; CHECK-NEXT: vmov s17, r7
; CHECK-NEXT: bl exp10f
-; CHECK-NEXT: vmov s16, r0
-; CHECK-NEXT: vmov r2, r3, d9
-; CHECK-NEXT: vmov r0, r1, d8
-; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: mov r1, r7
+; CHECK-NEXT: mov r2, r5
+; CHECK-NEXT: mov r3, r4
; CHECK-NEXT: add sp, #4
; CHECK-NEXT: pop {r4, r5, r6, r7, pc}
%r = call <4 x float> @llvm.exp10.v4f32(<4 x float> %x)
diff --git a/llvm/test/CodeGen/ARM/llvm.frexp.ll b/llvm/test/CodeGen/ARM/llvm.frexp.ll
index 376426d..80972b75 100644
--- a/llvm/test/CodeGen/ARM/llvm.frexp.ll
+++ b/llvm/test/CodeGen/ARM/llvm.frexp.ll
@@ -362,33 +362,31 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) {
define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) {
; CHECK-LABEL: test_frexp_v4f32_v4i32_only_use_fract:
; CHECK: @ %bb.0:
-; CHECK-NEXT: push {r4, r5, r6, lr}
-; CHECK-NEXT: vpush {d8, d9}
-; CHECK-NEXT: sub sp, #16
-; CHECK-NEXT: mov r5, r1
-; CHECK-NEXT: mov r6, r0
-; CHECK-NEXT: mov r1, sp
-; CHECK-NEXT: mov r0, r3
-; CHECK-NEXT: mov r4, r2
-; CHECK-NEXT: bl frexpf
+; CHECK-NEXT: push {r4, r5, r6, r7, lr}
+; CHECK-NEXT: sub sp, #20
+; CHECK-NEXT: mov r6, r1
; CHECK-NEXT: add r1, sp, #4
-; CHECK-NEXT: vmov s19, r0
-; CHECK-NEXT: mov r0, r4
+; CHECK-NEXT: mov r7, r0
+; CHECK-NEXT: mov r0, r3
+; CHECK-NEXT: mov r5, r2
; CHECK-NEXT: bl frexpf
; CHECK-NEXT: add r1, sp, #8
-; CHECK-NEXT: vmov s18, r0
+; CHECK-NEXT: mov r4, r0
; CHECK-NEXT: mov r0, r5
; CHECK-NEXT: bl frexpf
; CHECK-NEXT: add r1, sp, #12
-; CHECK-NEXT: vmov s17, r0
+; CHECK-NEXT: mov r5, r0
; CHECK-NEXT: mov r0, r6
; CHECK-NEXT: bl frexpf
-; CHECK-NEXT: vmov s16, r0
-; CHECK-NEXT: vmov r2, r3, d9
-; CHECK-NEXT: vmov r0, r1, d8
-; CHECK-NEXT: add sp, #16
-; CHECK-NEXT: vpop {d8, d9}
-; CHECK-NEXT: pop {r4, r5, r6, pc}
+; CHECK-NEXT: add r1, sp, #16
+; CHECK-NEXT: mov r6, r0
+; CHECK-NEXT: mov r0, r7
+; CHECK-NEXT: bl frexpf
+; CHECK-NEXT: mov r1, r6
+; CHECK-NEXT: mov r2, r5
+; CHECK-NEXT: mov r3, r4
+; CHECK-NEXT: add sp, #20
+; CHECK-NEXT: pop {r4, r5, r6, r7, pc}
%result = call { <4 x float>, <4 x i32> } @llvm.frexp.v4f32.v4i32(<4 x float> %a)
%result.0 = extractvalue { <4 x float>, <4 x i32> } %result, 0
ret <4 x float> %result.0
diff --git a/llvm/test/CodeGen/ARM/lrint-conv.ll b/llvm/test/CodeGen/ARM/lrint-conv.ll
index 9aa9511..23a2685 100644
--- a/llvm/test/CodeGen/ARM/lrint-conv.ll
+++ b/llvm/test/CodeGen/ARM/lrint-conv.ll
@@ -1,5 +1,7 @@
-; RUN: llc < %s -mtriple=arm-eabi -float-abi=soft | FileCheck %s --check-prefix=SOFTFP
-; RUN: llc < %s -mtriple=arm-eabi -float-abi=hard | FileCheck %s --check-prefix=HARDFP
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=armv7-none-eabi -float-abi=soft | FileCheck %s --check-prefixes=CHECK,CHECK-SOFT
+; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+vfp2 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-NOFP16
+; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+vfp2,+fullfp16 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-FP16
; FIXME: crash
; define i32 @testmswh_builtin(half %x) {
@@ -8,36 +10,37 @@
; ret i32 %0
; }
-; SOFTFP-LABEL: testmsws_builtin:
-; SOFTFP: bl lrintf
-; HARDFP-LABEL: testmsws_builtin:
-; HARDFP: bl lrintf
define i32 @testmsws_builtin(float %x) {
+; CHECK-LABEL: testmsws_builtin:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: b lrintf
entry:
%0 = tail call i32 @llvm.lrint.i32.f32(float %x)
ret i32 %0
}
-; SOFTFP-LABEL: testmswd_builtin:
-; SOFTFP: bl lrint
-; HARDFP-LABEL: testmswd_builtin:
-; HARDFP: bl lrint
define i32 @testmswd_builtin(double %x) {
+; CHECK-LABEL: testmswd_builtin:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: b lrint
entry:
%0 = tail call i32 @llvm.lrint.i32.f64(double %x)
ret i32 %0
}
-; FIXME(#44744): incorrect libcall
-; SOFTFP-LABEL: testmswq_builtin:
-; SOFTFP: bl lrintl
-; HARDFP-LABEL: testmswq_builtin:
-; HARDFP: bl lrintl
define i32 @testmswq_builtin(fp128 %x) {
+; CHECK-LABEL: testmswq_builtin:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bl lrintl
+; CHECK-NEXT: pop {r11, pc}
entry:
%0 = tail call i32 @llvm.lrint.i32.f128(fp128 %x)
ret i32 %0
}
-declare i32 @llvm.lrint.i32.f32(float) nounwind readnone
-declare i32 @llvm.lrint.i32.f64(double) nounwind readnone
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-FP16: {{.*}}
+; CHECK-NOFP16: {{.*}}
+; CHECK-SOFT: {{.*}}
diff --git a/llvm/test/CodeGen/ARM/vector-lrint.ll b/llvm/test/CodeGen/ARM/vector-lrint.ll
index fe5e3cb..c1159da 100644
--- a/llvm/test/CodeGen/ARM/vector-lrint.ll
+++ b/llvm/test/CodeGen/ARM/vector-lrint.ll
@@ -14,31 +14,26 @@
; %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f16(<1 x half> %x)
; ret <1 x iXLen> %a
; }
-; declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f16(<1 x half>)
; define <2 x iXLen> @lrint_v2f16(<2 x half> %x) {
; %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f16(<2 x half> %x)
; ret <2 x iXLen> %a
; }
-; declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f16(<2 x half>)
; define <4 x iXLen> @lrint_v4f16(<4 x half> %x) {
; %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f16(<4 x half> %x)
; ret <4 x iXLen> %a
; }
-; declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f16(<4 x half>)
; define <8 x iXLen> @lrint_v8f16(<8 x half> %x) {
; %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f16(<8 x half> %x)
; ret <8 x iXLen> %a
; }
-; declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f16(<8 x half>)
; define <16 x iXLen> @lrint_v16f16(<16 x half> %x) {
; %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f16(<16 x half> %x)
; ret <16 x iXLen> %a
; }
-; declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f16(<16 x half>)
define <1 x iXLen> @lrint_v1f32(<1 x float> %x) {
; LE-I32-LABEL: lrint_v1f32:
@@ -76,7 +71,6 @@ define <1 x iXLen> @lrint_v1f32(<1 x float> %x) {
%a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float> %x)
ret <1 x iXLen> %a
}
-declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float>)
define <2 x iXLen> @lrint_v2f32(<2 x float> %x) {
; LE-I32-LABEL: lrint_v2f32:
@@ -160,7 +154,6 @@ define <2 x iXLen> @lrint_v2f32(<2 x float> %x) {
%a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f32(<2 x float> %x)
ret <2 x iXLen> %a
}
-declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f32(<2 x float>)
define <4 x iXLen> @lrint_v4f32(<4 x float> %x) {
; LE-I32-LABEL: lrint_v4f32:
@@ -274,7 +267,6 @@ define <4 x iXLen> @lrint_v4f32(<4 x float> %x) {
%a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f32(<4 x float> %x)
ret <4 x iXLen> %a
}
-declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f32(<4 x float>)
define <8 x iXLen> @lrint_v8f32(<8 x float> %x) {
; LE-I32-LABEL: lrint_v8f32:
@@ -488,7 +480,6 @@ define <8 x iXLen> @lrint_v8f32(<8 x float> %x) {
%a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f32(<8 x float> %x)
ret <8 x iXLen> %a
}
-declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f32(<8 x float>)
define <16 x iXLen> @lrint_v16f32(<16 x float> %x) {
; LE-I32-LABEL: lrint_v16f32:
@@ -1005,7 +996,6 @@ define <16 x iXLen> @lrint_v16f32(<16 x float> %x) {
%a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float> %x)
ret <16 x iXLen> %a
}
-declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float>)
define <1 x iXLen> @lrint_v1f64(<1 x double> %x) {
; LE-I32-LABEL: lrint_v1f64:
@@ -1043,7 +1033,6 @@ define <1 x iXLen> @lrint_v1f64(<1 x double> %x) {
%a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f64(<1 x double> %x)
ret <1 x iXLen> %a
}
-declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f64(<1 x double>)
define <2 x iXLen> @lrint_v2f64(<2 x double> %x) {
; LE-I32-LABEL: lrint_v2f64:
@@ -1120,7 +1109,6 @@ define <2 x iXLen> @lrint_v2f64(<2 x double> %x) {
%a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f64(<2 x double> %x)
ret <2 x iXLen> %a
}
-declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f64(<2 x double>)
define <4 x iXLen> @lrint_v4f64(<4 x double> %x) {
; LE-I32-LABEL: lrint_v4f64:
@@ -1237,7 +1225,6 @@ define <4 x iXLen> @lrint_v4f64(<4 x double> %x) {
%a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f64(<4 x double> %x)
ret <4 x iXLen> %a
}
-declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f64(<4 x double>)
define <8 x iXLen> @lrint_v8f64(<8 x double> %x) {
; LE-I32-LABEL: lrint_v8f64:
@@ -1467,7 +1454,6 @@ define <8 x iXLen> @lrint_v8f64(<8 x double> %x) {
%a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f64(<8 x double> %x)
ret <8 x iXLen> %a
}
-declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f64(<8 x double>)
define <16 x iXLen> @lrint_v16f64(<16 x double> %x) {
; LE-I32-LABEL: lrint_v16f64:
@@ -2053,7 +2039,6 @@ define <16 x iXLen> @lrint_v16f64(<16 x double> %x) {
%a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f64(<16 x double> %x)
ret <16 x iXLen> %a
}
-declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f64(<16 x double>)
define <1 x iXLen> @lrint_v1fp128(<1 x fp128> %x) {
; LE-I32-LABEL: lrint_v1fp128:
@@ -2091,7 +2076,6 @@ define <1 x iXLen> @lrint_v1fp128(<1 x fp128> %x) {
%a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1fp128(<1 x fp128> %x)
ret <1 x iXLen> %a
}
-declare <1 x iXLen> @llvm.lrint.v1iXLen.v1fp128(<1 x fp128>)
define <2 x iXLen> @lrint_v2fp128(<2 x fp128> %x) {
; LE-I32-LABEL: lrint_v2fp128:
@@ -2194,7 +2178,6 @@ define <2 x iXLen> @lrint_v2fp128(<2 x fp128> %x) {
%a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2fp128(<2 x fp128> %x)
ret <2 x iXLen> %a
}
-declare <2 x iXLen> @llvm.lrint.v2iXLen.v2fp128(<2 x fp128>)
define <4 x iXLen> @lrint_v4fp128(<4 x fp128> %x) {
; LE-I32-LABEL: lrint_v4fp128:
@@ -2347,7 +2330,6 @@ define <4 x iXLen> @lrint_v4fp128(<4 x fp128> %x) {
%a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4fp128(<4 x fp128> %x)
ret <4 x iXLen> %a
}
-declare <4 x iXLen> @llvm.lrint.v4iXLen.v4fp128(<4 x fp128>)
define <8 x iXLen> @lrint_v8fp128(<8 x fp128> %x) {
; LE-I32-LABEL: lrint_v8fp128:
@@ -2664,7 +2646,6 @@ define <8 x iXLen> @lrint_v8fp128(<8 x fp128> %x) {
%a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8fp128(<8 x fp128> %x)
ret <8 x iXLen> %a
}
-declare <8 x iXLen> @llvm.lrint.v8iXLen.v8fp128(<8 x fp128>)
define <16 x iXLen> @lrint_v16fp128(<16 x fp128> %x) {
; LE-I32-LABEL: lrint_v16fp128:
@@ -3262,4 +3243,3 @@ define <16 x iXLen> @lrint_v16fp128(<16 x fp128> %x) {
%a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16fp128(<16 x fp128> %x)
ret <16 x iXLen> %a
}
-declare <16 x iXLen> @llvm.lrint.v16iXLen.v16fp128(<16 x fp128>)
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressU.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressU.ll
index 288dea0..b043ea1 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressU.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressU.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 666, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 666, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressV.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressV.ll
index e9abcf9..8219ffd 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressV.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressV.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 666, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 666, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressW.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressW.ll
index 238f488..31d8dd1 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressW.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-AddressW.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 666, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 666, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-BorderColor.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-BorderColor.ll
index 8dc69eb..2bb4af5 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-BorderColor.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-BorderColor.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 666, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 666, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ComparisonFunc.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ComparisonFunc.ll
index b2c8faf..62fda73 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ComparisonFunc.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ComparisonFunc.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 666, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 666, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Filter.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Filter.ll
index 758d262..7e8de14 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Filter.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Filter.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 45, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 45, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Flag.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Flag.ll
new file mode 100644
index 0000000..8f7ef88
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-Flag.ll
@@ -0,0 +1,19 @@
+; RUN: not opt -passes='print<dxil-root-signature>' %s -S -o - 2>&1 | FileCheck %s
+
+
+target triple = "dxil-unknown-shadermodel6.0-compute"
+
+; CHECK: error: Invalid value for Static Sampler Flag: 4
+; CHECK-NOT: Root Signature Definitions
+
+define void @main() #0 {
+entry:
+ ret void
+}
+attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
+
+
+!dx.rootsignatures = !{!2} ; list of function/root signature pairs
+!2 = !{ ptr @main, !3, i32 3 } ; function, root signature
+!3 = !{ !5 } ; list of root signature elements
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 4 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxAnisotropy.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxAnisotropy.ll
index 47d4b52..312e769 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxAnisotropy.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxAnisotropy.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 666, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 666, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll
index 855e0c0..80fd208 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 0x7FF8000000000000, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 0x7FF8000000000000, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll
index 812749b..5daaf69 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float 0x7FF8000000000000, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float 0x7FF8000000000000, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll
index 6898aec..423987b 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 6.660000e+02, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 6.660000e+02, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-RegisterSpace.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-RegisterSpace.ll
index dc6ee42..af630dc 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-RegisterSpace.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-RegisterSpace.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 4294967280, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 4294967280, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderRegister.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderRegister.ll
index 6cee1dd9..bd752f0 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderRegister.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderRegister.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 4294967295, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 4294967295, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderVisibility.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderVisibility.ll
index fa5bf12..ca0c02d 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderVisibility.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-ShaderVisibility.ll
@@ -16,4 +16,4 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 666 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 666, i32 0 }
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers.ll
index 1dd470d..77c5c7a 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers.ll
@@ -15,7 +15,7 @@ attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
!dx.rootsignatures = !{!2} ; list of function/root signature pairs
!2 = !{ ptr @main, !3, i32 2 } ; function, root signature
!3 = !{ !5 } ; list of root signature elements
-!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
; DXC: - Name: RTS0
; DXC-NEXT: Size: 76
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers_V3.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers_V3.ll
new file mode 100644
index 0000000..7e56f04
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers_V3.ll
@@ -0,0 +1,42 @@
+; RUN: opt %s -dxil-embed -dxil-globals -S -o - | FileCheck %s
+; RUN: llc %s --filetype=obj -o - | obj2yaml | FileCheck %s --check-prefix=DXC
+
+target triple = "dxil-unknown-shadermodel6.0-compute"
+
+; CHECK: @dx.rts0 = private constant [248 x i8] c"{{.*}}", section "RTS0", align 4
+
+define void @main() #0 {
+entry:
+ ret void
+}
+attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
+
+
+!dx.rootsignatures = !{!2} ; list of function/root signature pairs
+!2 = !{ ptr @main, !3, i32 3 } ; function, root signature
+!3 = !{ !5, !6, !7, !8 } ; list of root signature elements
+!5 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 1 }
+!6 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 43, i32 0, i32 0, i32 2 }
+!7 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 44, i32 0, i32 0, i32 0 }
+!8 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 45, i32 0, i32 0, i32 3 }
+
+; DXC: - Name: RTS0
+; DXC-NEXT: Size: 248
+; DXC-NEXT: RootSignature:
+; DXC-NEXT: Version: 3
+; DXC-NEXT: NumRootParameters: 0
+; DXC-NEXT: RootParametersOffset: 24
+; DXC-NEXT: NumStaticSamplers: 4
+; DXC-NEXT: StaticSamplersOffset: 24
+; DXC-NEXT: Parameters: []
+; DXC-NEXT: Samplers:
+; DXC-LABEL: ShaderRegister: 42
+; DXC: SAMPLER_FLAG_UINT_BORDER_COLOR: true
+; DXC-LABEL: ShaderRegister: 43
+; DXC: SAMPLER_FLAG_NON_NORMALIZED_COORDINATES: true
+; DXC-LABEL: ShaderRegister: 44
+; DXC-NOT: SAMPLER_FLAG_NON_NORMALIZED_COORDINATES:
+; DXC-NOT: SAMPLER_FLAG_UINT_BORDER_COLOR:
+; DXC-LABEL: ShaderRegister: 45
+; DXC: SAMPLER_FLAG_UINT_BORDER_COLOR: true
+; DXC-NEXT: SAMPLER_FLAG_NON_NORMALIZED_COORDINATES: true
diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler.ll
index c244095..b68606d 100644
--- a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler.ll
+++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-sampler.ll
@@ -10,6 +10,6 @@ entry:
!0 = !{ptr @CSMain, !1, i32 2}
!1 = !{!2, !3}
-!2 = !{ !"StaticSampler", i32 5, i32 4, i32 5, i32 3, float 0x3FF7CCCCC0000000, i32 10, i32 2, i32 1, float -1.270000e+02, float 1.220000e+02, i32 42, i32 0, i32 0 }
+!2 = !{ !"StaticSampler", i32 5, i32 4, i32 5, i32 3, float 0x3FF7CCCCC0000000, i32 10, i32 2, i32 1, float -1.270000e+02, float 1.220000e+02, i32 42, i32 0, i32 0, i32 0 }
!3 = !{!"DescriptorTable", i32 0, !4}
!4 = !{!"Sampler", i32 1, i32 42, i32 0, i32 -1, i32 0}
diff --git a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-static-sampler-range.ll b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-static-sampler-range.ll
index 9ac02ebb..7c836e2 100644
--- a/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-static-sampler-range.ll
+++ b/llvm/test/CodeGen/DirectX/rootsignature-validation-fail-static-sampler-range.ll
@@ -10,5 +10,5 @@ entry:
!0 = !{ptr @CSMain, !1, i32 2}
!1 = !{!2, !3}
-!2 = !{ !"StaticSampler", i32 5, i32 4, i32 5, i32 3, float 0x3FF7CCCCC0000000, i32 10, i32 2, i32 1, float -1.270000e+02, float 1.220000e+02, i32 42, i32 0, i32 0 }
-!3 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0 }
+!2 = !{ !"StaticSampler", i32 5, i32 4, i32 5, i32 3, float 0x3FF7CCCCC0000000, i32 10, i32 2, i32 1, float -1.270000e+02, float 1.220000e+02, i32 42, i32 0, i32 0, i32 0 }
+!3 = !{ !"StaticSampler", i32 4, i32 2, i32 3, i32 5, float 0x3FF6CCCCC0000000, i32 9, i32 3, i32 2, float -1.280000e+02, float 1.280000e+02, i32 42, i32 0, i32 0, i32 0 }
diff --git a/llvm/test/CodeGen/Hexagon/unaligned-vec-store.ll b/llvm/test/CodeGen/Hexagon/unaligned-vec-store.ll
new file mode 100644
index 0000000..267e365
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/unaligned-vec-store.ll
@@ -0,0 +1,23 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv68 -mattr=+hvxv68,+hvx-length128B < %s | FileCheck %s
+; REQUIRES: asserts
+
+; Check that the test does not assert when unaligned vector store V6_vS32Ub_npred_ai is generated.
+; CHECK: if (!p{{[0-3]}}) vmemu
+
+target triple = "hexagon-unknown-unknown-elf"
+
+define fastcc void @test(i1 %cmp.i.i) {
+entry:
+ %call.i.i.i172 = load ptr, ptr null, align 4
+ %add.ptr = getelementptr i8, ptr %call.i.i.i172, i32 1
+ store <32 x i32> zeroinitializer, ptr %add.ptr, align 128
+ %add.ptr4.i4 = getelementptr i8, ptr %call.i.i.i172, i32 129
+ br i1 %cmp.i.i, label %common.ret, label %if.end.i.i
+
+common.ret: ; preds = %if.end.i.i, %entry
+ ret void
+
+if.end.i.i: ; preds = %entry
+ store <32 x i32> zeroinitializer, ptr %add.ptr4.i4, align 1
+ br label %common.ret
+}
diff --git a/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll b/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll
index 7c9f375..40d36fb 100644
--- a/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll
+++ b/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll
@@ -97,7 +97,6 @@ entry:
; ALL: lw $[[R0:[0-9]+]], %got(v4f32)(
; ALL: ld.w $w12, 0($[[R0]])
; ALL: move.v $w[[W0:13]], $w12
-; NOODDSPREG: move.v $w[[W0:12]], $w13
; ALL: teqi $zero, 1
; ALL-NOT: st.w
; ALL-NOT: ld.w
diff --git a/llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll b/llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll
index 18fb879..21ca041 100644
--- a/llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll
+++ b/llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll
@@ -115,5 +115,150 @@ define ptx_kernel void @inlineasm(ptr %p) {
store <2 x float> %mul, ptr %p, align 8
ret void
}
+
+define ptx_kernel void @trunc_v2i32(<2 x i32> %0) {
+; CHECK-SM90A-LABEL: trunc_v2i32(
+; CHECK-SM90A: {
+; CHECK-SM90A-NEXT: .reg .b32 %r<7>;
+; CHECK-SM90A-NEXT: .reg .b64 %rd<2>;
+; CHECK-SM90A-EMPTY:
+; CHECK-SM90A-NEXT: // %bb.0:
+; CHECK-SM90A-NEXT: ld.param.v2.b32 {%r1, %r2}, [trunc_v2i32_param_0];
+; CHECK-SM90A-NEXT: prmt.b32 %r3, %r1, %r2, 0x3340U;
+; CHECK-SM90A-NEXT: mov.b32 %r4, 0;
+; CHECK-SM90A-NEXT: prmt.b32 %r5, %r4, 0, 0x3340U;
+; CHECK-SM90A-NEXT: prmt.b32 %r6, %r5, %r3, 0x5410U;
+; CHECK-SM90A-NEXT: mov.b64 %rd1, 0;
+; CHECK-SM90A-NEXT: st.b32 [%rd1], %r6;
+; CHECK-SM90A-NEXT: ret;
+;
+; CHECK-SM100-LABEL: trunc_v2i32(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<7>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<3>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.b64 %rd1, [trunc_v2i32_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM100-NEXT: mov.b32 %r3, 0;
+; CHECK-SM100-NEXT: prmt.b32 %r4, %r3, 0, 0x3340U;
+; CHECK-SM100-NEXT: prmt.b32 %r5, %r1, %r2, 0x3340U;
+; CHECK-SM100-NEXT: prmt.b32 %r6, %r4, %r5, 0x5410U;
+; CHECK-SM100-NEXT: mov.b64 %rd2, 0;
+; CHECK-SM100-NEXT: st.b32 [%rd2], %r6;
+; CHECK-SM100-NEXT: ret;
+ %2 = trunc <2 x i32> %0 to <2 x i8>
+ %3 = shufflevector <2 x i8> zeroinitializer, <2 x i8> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ store <4 x i8> %3, ptr null, align 4
+ ret void
+}
+
+define ptx_kernel void @zextend_to_v2i32(<2 x i8> %0) {
+; CHECK-SM90A-LABEL: zextend_to_v2i32(
+; CHECK-SM90A: {
+; CHECK-SM90A-NEXT: .reg .b16 %rs<3>;
+; CHECK-SM90A-NEXT: .reg .b32 %r<4>;
+; CHECK-SM90A-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM90A-EMPTY:
+; CHECK-SM90A-NEXT: // %bb.0:
+; CHECK-SM90A-NEXT: ld.param.v2.b8 {%rs1, %rs2}, [zextend_to_v2i32_param_0];
+; CHECK-SM90A-NEXT: mov.b32 %r1, {%rs1, %rs2};
+; CHECK-SM90A-NEXT: cvt.u32.u16 %r2, %rs1;
+; CHECK-SM90A-NEXT: cvt.u32.u16 %r3, %rs2;
+; CHECK-SM90A-NEXT: mov.b64 %rd1, 12;
+; CHECK-SM90A-NEXT: st.b32 [%rd1], %r3;
+; CHECK-SM90A-NEXT: mov.b64 %rd2, 8;
+; CHECK-SM90A-NEXT: st.b32 [%rd2], %r2;
+; CHECK-SM90A-NEXT: mov.b64 %rd3, 4;
+; CHECK-SM90A-NEXT: st.b32 [%rd3], 0;
+; CHECK-SM90A-NEXT: mov.b64 %rd4, 0;
+; CHECK-SM90A-NEXT: st.b32 [%rd4], 0;
+; CHECK-SM90A-NEXT: ret;
+;
+; CHECK-SM100-LABEL: zextend_to_v2i32(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b16 %rs<3>;
+; CHECK-SM100-NEXT: .reg .b32 %r<5>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<8>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b8 {%rs1, %rs2}, [zextend_to_v2i32_param_0];
+; CHECK-SM100-NEXT: mov.b32 %r1, {%rs1, %rs2};
+; CHECK-SM100-NEXT: cvt.u32.u16 %r2, %rs2;
+; CHECK-SM100-NEXT: cvt.u32.u16 %r3, %rs1;
+; CHECK-SM100-NEXT: mov.b64 %rd1, {%r3, %r2};
+; CHECK-SM100-NEXT: mov.b32 %r4, 0;
+; CHECK-SM100-NEXT: mov.b64 %rd2, {%r4, %r4};
+; CHECK-SM100-NEXT: mov.b64 %rd3, 4;
+; CHECK-SM100-NEXT: st.b32 [%rd3], %rd2;
+; CHECK-SM100-NEXT: mov.b64 %rd4, 0;
+; CHECK-SM100-NEXT: st.b32 [%rd4], %rd2;
+; CHECK-SM100-NEXT: mov.b64 %rd5, 8;
+; CHECK-SM100-NEXT: st.b32 [%rd5], %rd1;
+; CHECK-SM100-NEXT: shr.u64 %rd6, %rd1, 32;
+; CHECK-SM100-NEXT: mov.b64 %rd7, 12;
+; CHECK-SM100-NEXT: st.b32 [%rd7], %rd6;
+; CHECK-SM100-NEXT: ret;
+ %2 = zext <2 x i8> %0 to <2 x i32>
+ %3 = shufflevector <2 x i32> zeroinitializer, <2 x i32> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ store <4 x i32> %3, ptr null, align 4
+ ret void
+}
+
+define ptx_kernel void @sextend_to_v2i32(<2 x i8> %0) {
+; CHECK-SM90A-LABEL: sextend_to_v2i32(
+; CHECK-SM90A: {
+; CHECK-SM90A-NEXT: .reg .b16 %rs<3>;
+; CHECK-SM90A-NEXT: .reg .b32 %r<6>;
+; CHECK-SM90A-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM90A-EMPTY:
+; CHECK-SM90A-NEXT: // %bb.0:
+; CHECK-SM90A-NEXT: ld.param.v2.b8 {%rs1, %rs2}, [sextend_to_v2i32_param_0];
+; CHECK-SM90A-NEXT: mov.b32 %r1, {%rs1, %rs2};
+; CHECK-SM90A-NEXT: cvt.u32.u16 %r2, %rs1;
+; CHECK-SM90A-NEXT: cvt.s32.s8 %r3, %r2;
+; CHECK-SM90A-NEXT: cvt.u32.u16 %r4, %rs2;
+; CHECK-SM90A-NEXT: cvt.s32.s8 %r5, %r4;
+; CHECK-SM90A-NEXT: mov.b64 %rd1, 12;
+; CHECK-SM90A-NEXT: st.b32 [%rd1], %r5;
+; CHECK-SM90A-NEXT: mov.b64 %rd2, 8;
+; CHECK-SM90A-NEXT: st.b32 [%rd2], %r3;
+; CHECK-SM90A-NEXT: mov.b64 %rd3, 4;
+; CHECK-SM90A-NEXT: st.b32 [%rd3], 0;
+; CHECK-SM90A-NEXT: mov.b64 %rd4, 0;
+; CHECK-SM90A-NEXT: st.b32 [%rd4], 0;
+; CHECK-SM90A-NEXT: ret;
+;
+; CHECK-SM100-LABEL: sextend_to_v2i32(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b16 %rs<3>;
+; CHECK-SM100-NEXT: .reg .b32 %r<7>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<8>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b8 {%rs1, %rs2}, [sextend_to_v2i32_param_0];
+; CHECK-SM100-NEXT: mov.b32 %r1, {%rs1, %rs2};
+; CHECK-SM100-NEXT: cvt.u32.u16 %r2, %rs2;
+; CHECK-SM100-NEXT: cvt.s32.s8 %r3, %r2;
+; CHECK-SM100-NEXT: cvt.u32.u16 %r4, %rs1;
+; CHECK-SM100-NEXT: cvt.s32.s8 %r5, %r4;
+; CHECK-SM100-NEXT: mov.b64 %rd1, {%r5, %r3};
+; CHECK-SM100-NEXT: mov.b32 %r6, 0;
+; CHECK-SM100-NEXT: mov.b64 %rd2, {%r6, %r6};
+; CHECK-SM100-NEXT: mov.b64 %rd3, 4;
+; CHECK-SM100-NEXT: st.b32 [%rd3], %rd2;
+; CHECK-SM100-NEXT: mov.b64 %rd4, 0;
+; CHECK-SM100-NEXT: st.b32 [%rd4], %rd2;
+; CHECK-SM100-NEXT: mov.b64 %rd5, 8;
+; CHECK-SM100-NEXT: st.b32 [%rd5], %rd1;
+; CHECK-SM100-NEXT: shr.u64 %rd6, %rd1, 32;
+; CHECK-SM100-NEXT: mov.b64 %rd7, 12;
+; CHECK-SM100-NEXT: st.b32 [%rd7], %rd6;
+; CHECK-SM100-NEXT: ret;
+ %2 = sext <2 x i8> %0 to <2 x i32>
+ %3 = shufflevector <2 x i32> zeroinitializer, <2 x i32> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ store <4 x i32> %3, ptr null, align 4
+ ret void
+}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll b/llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll
index a2ad294..98314a0 100644
--- a/llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll
+++ b/llvm/test/CodeGen/PowerPC/urem-vector-lkk.ll
@@ -897,31 +897,31 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) {
; P8LE-NEXT: mfvsrd r6, v2
; P8LE-NEXT: mfvsrd r8, v3
; P8LE-NEXT: ori r3, r3, 51289
+; P8LE-NEXT: mffprd r4, f0
; P8LE-NEXT: ori r5, r5, 42889
-; P8LE-NEXT: rldic r4, r3, 36, 1
-; P8LE-NEXT: mffprd r3, f0
+; P8LE-NEXT: rldic r3, r3, 36, 1
; P8LE-NEXT: rldic r5, r5, 35, 1
; P8LE-NEXT: rldicl r7, r6, 63, 1
-; P8LE-NEXT: oris r4, r4, 45590
+; P8LE-NEXT: oris r3, r3, 45590
; P8LE-NEXT: oris r5, r5, 1603
-; P8LE-NEXT: ori r4, r4, 17097
+; P8LE-NEXT: ori r3, r3, 17097
; P8LE-NEXT: ori r5, r5, 21445
-; P8LE-NEXT: mulhdu r4, r3, r4
+; P8LE-NEXT: mulhdu r3, r4, r3
; P8LE-NEXT: mulhdu r5, r7, r5
-; P8LE-NEXT: sub r7, r3, r4
+; P8LE-NEXT: sub r7, r4, r3
; P8LE-NEXT: rldicl r5, r5, 57, 7
; P8LE-NEXT: rldicl r7, r7, 63, 1
; P8LE-NEXT: mulli r5, r5, 654
-; P8LE-NEXT: add r4, r7, r4
+; P8LE-NEXT: add r3, r7, r3
; P8LE-NEXT: lis r7, -16037
; P8LE-NEXT: ori r7, r7, 28749
-; P8LE-NEXT: rldicl r4, r4, 60, 4
+; P8LE-NEXT: rldicl r3, r3, 60, 4
; P8LE-NEXT: sub r5, r6, r5
; P8LE-NEXT: rldic r7, r7, 32, 0
-; P8LE-NEXT: mulli r4, r4, 23
+; P8LE-NEXT: mulli r3, r3, 23
; P8LE-NEXT: oris r7, r7, 52170
; P8LE-NEXT: ori r7, r7, 12109
-; P8LE-NEXT: sub r3, r3, r4
+; P8LE-NEXT: sub r3, r4, r3
; P8LE-NEXT: mulhdu r7, r8, r7
; P8LE-NEXT: mtfprd f1, r3
; P8LE-NEXT: li r3, 0
diff --git a/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll b/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll
index 435b0ab..816b12e 100644
--- a/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_conv_i64_to_fp32_elts.ll
@@ -35,12 +35,12 @@ define i64 @test2elt(<2 x i64> %a) local_unnamed_addr #0 {
;
; CHECK-BE-LABEL: test2elt:
; CHECK-BE: # %bb.0: # %entry
+; CHECK-BE-NEXT: xscvuxdsp f0, v2
+; CHECK-BE-NEXT: xscvdpspn v3, f0
; CHECK-BE-NEXT: xxswapd vs0, v2
-; CHECK-BE-NEXT: xscvuxdsp f1, v2
; CHECK-BE-NEXT: xscvuxdsp f0, f0
-; CHECK-BE-NEXT: xscvdpspn v2, f1
-; CHECK-BE-NEXT: xscvdpspn v3, f0
-; CHECK-BE-NEXT: vmrgow v2, v2, v3
+; CHECK-BE-NEXT: xscvdpspn v2, f0
+; CHECK-BE-NEXT: vmrgow v2, v3, v2
; CHECK-BE-NEXT: mfvsrd r3, v2
; CHECK-BE-NEXT: blr
entry:
@@ -327,12 +327,12 @@ define i64 @test2elt_signed(<2 x i64> %a) local_unnamed_addr #0 {
;
; CHECK-BE-LABEL: test2elt_signed:
; CHECK-BE: # %bb.0: # %entry
+; CHECK-BE-NEXT: xscvsxdsp f0, v2
+; CHECK-BE-NEXT: xscvdpspn v3, f0
; CHECK-BE-NEXT: xxswapd vs0, v2
-; CHECK-BE-NEXT: xscvsxdsp f1, v2
; CHECK-BE-NEXT: xscvsxdsp f0, f0
-; CHECK-BE-NEXT: xscvdpspn v2, f1
-; CHECK-BE-NEXT: xscvdpspn v3, f0
-; CHECK-BE-NEXT: vmrgow v2, v2, v3
+; CHECK-BE-NEXT: xscvdpspn v2, f0
+; CHECK-BE-NEXT: vmrgow v2, v3, v2
; CHECK-BE-NEXT: mfvsrd r3, v2
; CHECK-BE-NEXT: blr
entry:
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll b/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll
index 9a1ed8f..1d5d918 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/atomic-load-store.ll
@@ -37,7 +37,7 @@ define i8 @atomic_load_i8_unordered(ptr %a) nounwind {
;
; RV32IA-LABEL: atomic_load_i8_unordered:
; RV32IA: # %bb.0:
-; RV32IA-NEXT: lb a0, 0(a0)
+; RV32IA-NEXT: lbu a0, 0(a0)
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i8_unordered:
@@ -52,7 +52,7 @@ define i8 @atomic_load_i8_unordered(ptr %a) nounwind {
;
; RV64IA-LABEL: atomic_load_i8_unordered:
; RV64IA: # %bb.0:
-; RV64IA-NEXT: lb a0, 0(a0)
+; RV64IA-NEXT: lbu a0, 0(a0)
; RV64IA-NEXT: ret
%1 = load atomic i8, ptr %a unordered, align 1
ret i8 %1
@@ -71,7 +71,7 @@ define i8 @atomic_load_i8_monotonic(ptr %a) nounwind {
;
; RV32IA-LABEL: atomic_load_i8_monotonic:
; RV32IA: # %bb.0:
-; RV32IA-NEXT: lb a0, 0(a0)
+; RV32IA-NEXT: lbu a0, 0(a0)
; RV32IA-NEXT: ret
;
; RV64I-LABEL: atomic_load_i8_monotonic:
@@ -86,7 +86,7 @@ define i8 @atomic_load_i8_monotonic(ptr %a) nounwind {
;
; RV64IA-LABEL: atomic_load_i8_monotonic:
; RV64IA: # %bb.0:
-; RV64IA-NEXT: lb a0, 0(a0)
+; RV64IA-NEXT: lbu a0, 0(a0)
; RV64IA-NEXT: ret
%1 = load atomic i8, ptr %a monotonic, align 1
ret i8 %1
@@ -105,13 +105,13 @@ define i8 @atomic_load_i8_acquire(ptr %a) nounwind {
;
; RV32IA-WMO-LABEL: atomic_load_i8_acquire:
; RV32IA-WMO: # %bb.0:
-; RV32IA-WMO-NEXT: lb a0, 0(a0)
+; RV32IA-WMO-NEXT: lbu a0, 0(a0)
; RV32IA-WMO-NEXT: fence r, rw
; RV32IA-WMO-NEXT: ret
;
; RV32IA-TSO-LABEL: atomic_load_i8_acquire:
; RV32IA-TSO: # %bb.0:
-; RV32IA-TSO-NEXT: lb a0, 0(a0)
+; RV32IA-TSO-NEXT: lbu a0, 0(a0)
; RV32IA-TSO-NEXT: ret
;
; RV64I-LABEL: atomic_load_i8_acquire:
@@ -126,35 +126,35 @@ define i8 @atomic_load_i8_acquire(ptr %a) nounwind {
;
; RV64IA-WMO-LABEL: atomic_load_i8_acquire:
; RV64IA-WMO: # %bb.0:
-; RV64IA-WMO-NEXT: lb a0, 0(a0)
+; RV64IA-WMO-NEXT: lbu a0, 0(a0)
; RV64IA-WMO-NEXT: fence r, rw
; RV64IA-WMO-NEXT: ret
;
; RV64IA-TSO-LABEL: atomic_load_i8_acquire:
; RV64IA-TSO: # %bb.0:
-; RV64IA-TSO-NEXT: lb a0, 0(a0)
+; RV64IA-TSO-NEXT: lbu a0, 0(a0)
; RV64IA-TSO-NEXT: ret
;
; RV32IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i8_acquire:
; RV32IA-WMO-TRAILING-FENCE: # %bb.0:
-; RV32IA-WMO-TRAILING-FENCE-NEXT: lb a0, 0(a0)
+; RV32IA-WMO-TRAILING-FENCE-NEXT: lbu a0, 0(a0)
; RV32IA-WMO-TRAILING-FENCE-NEXT: fence r, rw
; RV32IA-WMO-TRAILING-FENCE-NEXT: ret
;
; RV32IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i8_acquire:
; RV32IA-TSO-TRAILING-FENCE: # %bb.0:
-; RV32IA-TSO-TRAILING-FENCE-NEXT: lb a0, 0(a0)
+; RV32IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0)
; RV32IA-TSO-TRAILING-FENCE-NEXT: ret
;
; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i8_acquire:
; RV64IA-WMO-TRAILING-FENCE: # %bb.0:
-; RV64IA-WMO-TRAILING-FENCE-NEXT: lb a0, 0(a0)
+; RV64IA-WMO-TRAILING-FENCE-NEXT: lbu a0, 0(a0)
; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw
; RV64IA-WMO-TRAILING-FENCE-NEXT: ret
;
; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i8_acquire:
; RV64IA-TSO-TRAILING-FENCE: # %bb.0:
-; RV64IA-TSO-TRAILING-FENCE-NEXT: lb a0, 0(a0)
+; RV64IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0)
; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
%1 = load atomic i8, ptr %a acquire, align 1
ret i8 %1
@@ -174,14 +174,14 @@ define i8 @atomic_load_i8_seq_cst(ptr %a) nounwind {
; RV32IA-WMO-LABEL: atomic_load_i8_seq_cst:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: fence rw, rw
-; RV32IA-WMO-NEXT: lb a0, 0(a0)
+; RV32IA-WMO-NEXT: lbu a0, 0(a0)
; RV32IA-WMO-NEXT: fence r, rw
; RV32IA-WMO-NEXT: ret
;
; RV32IA-TSO-LABEL: atomic_load_i8_seq_cst:
; RV32IA-TSO: # %bb.0:
; RV32IA-TSO-NEXT: fence rw, rw
-; RV32IA-TSO-NEXT: lb a0, 0(a0)
+; RV32IA-TSO-NEXT: lbu a0, 0(a0)
; RV32IA-TSO-NEXT: ret
;
; RV64I-LABEL: atomic_load_i8_seq_cst:
@@ -197,40 +197,40 @@ define i8 @atomic_load_i8_seq_cst(ptr %a) nounwind {
; RV64IA-WMO-LABEL: atomic_load_i8_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: fence rw, rw
-; RV64IA-WMO-NEXT: lb a0, 0(a0)
+; RV64IA-WMO-NEXT: lbu a0, 0(a0)
; RV64IA-WMO-NEXT: fence r, rw
; RV64IA-WMO-NEXT: ret
;
; RV64IA-TSO-LABEL: atomic_load_i8_seq_cst:
; RV64IA-TSO: # %bb.0:
; RV64IA-TSO-NEXT: fence rw, rw
-; RV64IA-TSO-NEXT: lb a0, 0(a0)
+; RV64IA-TSO-NEXT: lbu a0, 0(a0)
; RV64IA-TSO-NEXT: ret
;
; RV32IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i8_seq_cst:
; RV32IA-WMO-TRAILING-FENCE: # %bb.0:
; RV32IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw
-; RV32IA-WMO-TRAILING-FENCE-NEXT: lb a0, 0(a0)
+; RV32IA-WMO-TRAILING-FENCE-NEXT: lbu a0, 0(a0)
; RV32IA-WMO-TRAILING-FENCE-NEXT: fence r, rw
; RV32IA-WMO-TRAILING-FENCE-NEXT: ret
;
; RV32IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i8_seq_cst:
; RV32IA-TSO-TRAILING-FENCE: # %bb.0:
; RV32IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw
-; RV32IA-TSO-TRAILING-FENCE-NEXT: lb a0, 0(a0)
+; RV32IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0)
; RV32IA-TSO-TRAILING-FENCE-NEXT: ret
;
; RV64IA-WMO-TRAILING-FENCE-LABEL: atomic_load_i8_seq_cst:
; RV64IA-WMO-TRAILING-FENCE: # %bb.0:
; RV64IA-WMO-TRAILING-FENCE-NEXT: fence rw, rw
-; RV64IA-WMO-TRAILING-FENCE-NEXT: lb a0, 0(a0)
+; RV64IA-WMO-TRAILING-FENCE-NEXT: lbu a0, 0(a0)
; RV64IA-WMO-TRAILING-FENCE-NEXT: fence r, rw
; RV64IA-WMO-TRAILING-FENCE-NEXT: ret
;
; RV64IA-TSO-TRAILING-FENCE-LABEL: atomic_load_i8_seq_cst:
; RV64IA-TSO-TRAILING-FENCE: # %bb.0:
; RV64IA-TSO-TRAILING-FENCE-NEXT: fence rw, rw
-; RV64IA-TSO-TRAILING-FENCE-NEXT: lb a0, 0(a0)
+; RV64IA-TSO-TRAILING-FENCE-NEXT: lbu a0, 0(a0)
; RV64IA-TSO-TRAILING-FENCE-NEXT: ret
%1 = load atomic i8, ptr %a seq_cst, align 1
ret i8 %1
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
index 7204064..f1d17f9f 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
@@ -505,6 +505,9 @@
# DEBUG-NEXT: G_FREM (opcode {{[0-9]+}}): 1 type index, 0 imm indices
# DEBUG-NEXT: .. the first uncovered type index: 1, OK
# DEBUG-NEXT: .. the first uncovered imm index: 0, OK
+# DEBUG-NEXT: G_FMODF (opcode {{[0-9]+}}): 1 type index, 0 imm indices
+# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: G_FPOW (opcode {{[0-9]+}}): 1 type index, 0 imm indices
# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
# DEBUG-NEXT: .. the first uncovered type index: 1, OK
@@ -607,11 +610,11 @@
# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: G_FMINIMUMNUM (opcode {{[0-9]+}}): 1 type index, 0 imm indices
-# DEBUG-NEXT: .. opcode 219 is aliased to 183
+# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
# DEBUG-NEXT: .. the first uncovered type index: 1, OK
# DEBUG-NEXT: .. the first uncovered imm index: 0, OK
# DEBUG-NEXT: G_FMAXIMUMNUM (opcode {{[0-9]+}}): 1 type index, 0 imm indices
-# DEBUG-NEXT: .. opcode 220 is aliased to 183
+# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
# DEBUG-NEXT: .. the first uncovered type index: 1, OK
# DEBUG-NEXT: .. the first uncovered imm index: 0, OK
# DEBUG-NEXT: G_GET_FPENV (opcode {{[0-9]+}}): 1 type index, 0 imm indices
diff --git a/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll b/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll
index f8b1d50..edec1d0 100644
--- a/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll
+++ b/llvm/test/CodeGen/RISCV/cmov-branch-opt.ll
@@ -11,6 +11,8 @@
; RUN: | FileCheck -check-prefixes=SHORT_FORWARD,SFB-NOZICOND,SFB-NOZICOND-C %s
; RUN: llc -mtriple=riscv64 -mattr=+short-forward-branch-opt,+zicond -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=SHORT_FORWARD,SFB-ZICOND %s
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=RV32IXQCI
; The conditional move optimization in sifive-p450 requires that only a
; single c.mv instruction appears in the branch shadow.
@@ -42,6 +44,14 @@ define signext i32 @test1(i32 signext %x, i32 signext %y, i32 signext %z) {
; SHORT_FORWARD-NEXT: xor a0, a0, a1
; SHORT_FORWARD-NEXT: .LBB0_2:
; SHORT_FORWARD-NEXT: ret
+;
+; RV32IXQCI-LABEL: test1:
+; RV32IXQCI: # %bb.0:
+; RV32IXQCI-NEXT: bnez a2, .LBB0_2
+; RV32IXQCI-NEXT: # %bb.1:
+; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: .LBB0_2:
+; RV32IXQCI-NEXT: ret
%c = icmp eq i32 %z, 0
%a = xor i32 %x, %y
%b = select i1 %c, i32 %a, i32 %x
@@ -73,6 +83,14 @@ define signext i32 @test2(i32 signext %x, i32 signext %y, i32 signext %z) {
; SHORT_FORWARD-NEXT: xor a0, a0, a1
; SHORT_FORWARD-NEXT: .LBB1_2:
; SHORT_FORWARD-NEXT: ret
+;
+; RV32IXQCI-LABEL: test2:
+; RV32IXQCI: # %bb.0:
+; RV32IXQCI-NEXT: beqz a2, .LBB1_2
+; RV32IXQCI-NEXT: # %bb.1:
+; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: .LBB1_2:
+; RV32IXQCI-NEXT: ret
%c = icmp eq i32 %z, 0
%a = xor i32 %x, %y
%b = select i1 %c, i32 %x, i32 %a
@@ -120,6 +138,19 @@ define signext i32 @test3(i32 signext %v, i32 signext %w, i32 signext %x, i32 si
; SHORT_FORWARD-NEXT: .LBB2_4:
; SHORT_FORWARD-NEXT: addw a0, a0, a2
; SHORT_FORWARD-NEXT: ret
+;
+; RV32IXQCI-LABEL: test3:
+; RV32IXQCI: # %bb.0:
+; RV32IXQCI-NEXT: beqz a4, .LBB2_2
+; RV32IXQCI-NEXT: # %bb.1:
+; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: .LBB2_2:
+; RV32IXQCI-NEXT: beqz a4, .LBB2_4
+; RV32IXQCI-NEXT: # %bb.3:
+; RV32IXQCI-NEXT: xor a2, a2, a3
+; RV32IXQCI-NEXT: .LBB2_4:
+; RV32IXQCI-NEXT: add a0, a0, a2
+; RV32IXQCI-NEXT: ret
%c = icmp eq i32 %z, 0
%a = xor i32 %v, %w
%b = select i1 %c, i32 %v, i32 %a
@@ -167,6 +198,12 @@ define signext i32 @test4(i32 signext %x, i32 signext %y, i32 signext %z) {
; SFB-ZICOND-NEXT: li a0, 3
; SFB-ZICOND-NEXT: czero.nez a0, a0, a2
; SFB-ZICOND-NEXT: ret
+;
+; RV32IXQCI-LABEL: test4:
+; RV32IXQCI: # %bb.0:
+; RV32IXQCI-NEXT: li a0, 0
+; RV32IXQCI-NEXT: qc.lieqi a0, a2, 0, 3
+; RV32IXQCI-NEXT: ret
%c = icmp eq i32 %z, 0
%a = select i1 %c, i32 3, i32 0
ret i32 %a
@@ -199,6 +236,15 @@ define i16 @select_xor_1(i16 %A, i8 %cond) {
; SHORT_FORWARD-NEXT: xori a0, a0, 43
; SHORT_FORWARD-NEXT: .LBB4_2: # %entry
; SHORT_FORWARD-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_xor_1:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: andi a1, a1, 1
+; RV32IXQCI-NEXT: beqz a1, .LBB4_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: xori a0, a0, 43
+; RV32IXQCI-NEXT: .LBB4_2: # %entry
+; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
%cmp10 = icmp eq i8 %and, 0
@@ -236,6 +282,15 @@ define i16 @select_xor_1b(i16 %A, i8 %cond) {
; SHORT_FORWARD-NEXT: xori a0, a0, 43
; SHORT_FORWARD-NEXT: .LBB5_2: # %entry
; SHORT_FORWARD-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_xor_1b:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: andi a1, a1, 1
+; RV32IXQCI-NEXT: beqz a1, .LBB5_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: xori a0, a0, 43
+; RV32IXQCI-NEXT: .LBB5_2: # %entry
+; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
%cmp10 = icmp ne i8 %and, 1
@@ -289,6 +344,15 @@ define i32 @select_xor_2(i32 %A, i32 %B, i8 %cond) {
; SFB-ZICOND-NEXT: xor a0, a1, a0
; SFB-ZICOND-NEXT: .LBB6_2: # %entry
; SFB-ZICOND-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_xor_2:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB6_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: .LBB6_2: # %entry
+; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
%cmp10 = icmp eq i8 %and, 0
@@ -344,6 +408,15 @@ define i32 @select_xor_2b(i32 %A, i32 %B, i8 %cond) {
; SFB-ZICOND-NEXT: xor a0, a1, a0
; SFB-ZICOND-NEXT: .LBB7_2: # %entry
; SFB-ZICOND-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_xor_2b:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB7_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: .LBB7_2: # %entry
+; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
%cmp10 = icmp ne i8 %and, 1
@@ -397,6 +470,15 @@ define i32 @select_or(i32 %A, i32 %B, i8 %cond) {
; SFB-ZICOND-NEXT: or a0, a1, a0
; SFB-ZICOND-NEXT: .LBB8_2: # %entry
; SFB-ZICOND-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_or:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB8_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB8_2: # %entry
+; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
%cmp10 = icmp eq i8 %and, 0
@@ -452,6 +534,15 @@ define i32 @select_or_b(i32 %A, i32 %B, i8 %cond) {
; SFB-ZICOND-NEXT: or a0, a1, a0
; SFB-ZICOND-NEXT: .LBB9_2: # %entry
; SFB-ZICOND-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_or_b:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB9_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB9_2: # %entry
+; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
%cmp10 = icmp ne i8 %and, 1
@@ -505,6 +596,15 @@ define i32 @select_or_1(i32 %A, i32 %B, i32 %cond) {
; SFB-ZICOND-NEXT: or a0, a1, a0
; SFB-ZICOND-NEXT: .LBB10_2: # %entry
; SFB-ZICOND-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_or_1:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB10_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB10_2: # %entry
+; RV32IXQCI-NEXT: ret
entry:
%and = and i32 %cond, 1
%cmp10 = icmp eq i32 %and, 0
@@ -560,6 +660,15 @@ define i32 @select_or_1b(i32 %A, i32 %B, i32 %cond) {
; SFB-ZICOND-NEXT: or a0, a1, a0
; SFB-ZICOND-NEXT: .LBB11_2: # %entry
; SFB-ZICOND-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_or_1b:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB11_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB11_2: # %entry
+; RV32IXQCI-NEXT: ret
entry:
%and = and i32 %cond, 1
%cmp10 = icmp ne i32 %and, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/expandload.ll b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
index 9173fa4..cc1282a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/expandload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/expandload.ll
@@ -1666,20 +1666,20 @@ define <512 x i8> @test_expandload_v512i8_vlen512(ptr %base, <512 x i1> %mask, <
; CHECK-RV32-NEXT: .LBB61_32: # %else114
; CHECK-RV32-NEXT: slli a2, a3, 1
; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-RV32-NEXT: vsrl.vx v16, v0, a1
+; CHECK-RV32-NEXT: vsrl.vx v24, v0, a1
; CHECK-RV32-NEXT: bgez a2, .LBB61_34
; CHECK-RV32-NEXT: # %bb.33: # %cond.load117
; CHECK-RV32-NEXT: lbu a2, 0(a0)
-; CHECK-RV32-NEXT: vmv8r.v v24, v8
+; CHECK-RV32-NEXT: vmv8r.v v16, v8
; CHECK-RV32-NEXT: vmv.s.x v9, a2
; CHECK-RV32-NEXT: vsetivli zero, 31, e8, m1, tu, ma
; CHECK-RV32-NEXT: vslideup.vi v8, v9, 30
; CHECK-RV32-NEXT: addi a0, a0, 1
-; CHECK-RV32-NEXT: vmv1r.v v24, v8
-; CHECK-RV32-NEXT: vmv8r.v v8, v24
+; CHECK-RV32-NEXT: vmv1r.v v16, v8
+; CHECK-RV32-NEXT: vmv8r.v v8, v16
; CHECK-RV32-NEXT: .LBB61_34: # %else118
; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-RV32-NEXT: vmv.x.s a2, v16
+; CHECK-RV32-NEXT: vmv.x.s a2, v24
; CHECK-RV32-NEXT: bgez a3, .LBB61_35
; CHECK-RV32-NEXT: j .LBB61_572
; CHECK-RV32-NEXT: .LBB61_35: # %else122
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
index 1d691b1..a2fcd79 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
@@ -661,8 +661,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vmerge.vvm v8, v16, v8, v0
; RV32-NEXT: csrr a7, vlenb
-; RV32-NEXT: li t3, 36
-; RV32-NEXT: mul a7, a7, t3
+; RV32-NEXT: slli a7, a7, 5
; RV32-NEXT: add a7, sp, a7
; RV32-NEXT: addi a7, a7, 16
; RV32-NEXT: vs8r.v v8, (a7) # vscale x 64-byte Folded Spill
@@ -682,7 +681,11 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vl8r.v v8, (t1) # vscale x 64-byte Folded Reload
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vmerge.vvm v8, v24, v8, v0
-; RV32-NEXT: addi t1, sp, 16
+; RV32-NEXT: csrr t1, vlenb
+; RV32-NEXT: li t2, 44
+; RV32-NEXT: mul t1, t1, t2
+; RV32-NEXT: add t1, sp, t1
+; RV32-NEXT: addi t1, t1, 16
; RV32-NEXT: vs4r.v v8, (t1) # vscale x 32-byte Folded Spill
; RV32-NEXT: vmv.s.x v0, a7
; RV32-NEXT: addi a3, a3, 12
@@ -694,8 +697,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vmerge.vvm v8, v16, v24, v0
; RV32-NEXT: csrr a7, vlenb
-; RV32-NEXT: li t1, 20
-; RV32-NEXT: mul a7, a7, t1
+; RV32-NEXT: slli a7, a7, 4
; RV32-NEXT: add a7, sp, a7
; RV32-NEXT: addi a7, a7, 16
; RV32-NEXT: vs8r.v v8, (a7) # vscale x 64-byte Folded Spill
@@ -733,7 +735,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vmerge.vvm v8, v8, v16, v0
; RV32-NEXT: csrr a7, vlenb
-; RV32-NEXT: li t0, 28
+; RV32-NEXT: li t0, 24
; RV32-NEXT: mul a7, a7, t0
; RV32-NEXT: add a7, sp, a7
; RV32-NEXT: addi a7, a7, 16
@@ -755,7 +757,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vmerge.vvm v8, v24, v8, v0
; RV32-NEXT: csrr a6, vlenb
-; RV32-NEXT: li a7, 44
+; RV32-NEXT: li a7, 40
; RV32-NEXT: mul a6, a6, a7
; RV32-NEXT: add a6, sp, a6
; RV32-NEXT: addi a6, a6, 16
@@ -772,24 +774,19 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vmerge.vvm v8, v8, v16, v0
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a4, 12
-; RV32-NEXT: mul a1, a1, a4
+; RV32-NEXT: slli a1, a1, 3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
; RV32-NEXT: vmv.s.x v0, a3
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 36
-; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: slli a1, a1, 5
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vrgatherei16.vv v24, v8, v6
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 2
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vs8r.v v24, (a1) # vscale x 64-byte Folded Spill
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: li a3, 92
@@ -812,8 +809,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vs4r.v v8, (a1) # vscale x 32-byte Folded Spill
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 20
-; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
@@ -835,12 +831,6 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vmerge.vvm v8, v8, v16, v0
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 84
-; RV32-NEXT: mul a1, a1, a2
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
-; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: li a2, 72
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
@@ -860,30 +850,36 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vs4r.v v28, (a1) # vscale x 32-byte Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl8r.v v16, (a1) # vscale x 64-byte Folded Reload
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: li a2, 60
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl4r.v v16, (a1) # vscale x 32-byte Folded Reload
+; RV32-NEXT: vl4r.v v20, (a1) # vscale x 32-byte Folded Reload
+; RV32-NEXT: vmv.v.v v20, v16
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 2
+; RV32-NEXT: li a2, 60
+; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
-; RV32-NEXT: vmv.v.v v16, v8
+; RV32-NEXT: vs4r.v v20, (a1) # vscale x 32-byte Folded Spill
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 60
+; RV32-NEXT: li a2, 44
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v16, (a1) # vscale x 32-byte Folded Spill
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl4r.v v8, (a1) # vscale x 32-byte Folded Reload
+; RV32-NEXT: vl4r.v v16, (a1) # vscale x 32-byte Folded Reload
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vrgatherei16.vv v28, v8, v3
+; RV32-NEXT: vrgatherei16.vv v20, v16, v3
; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma
-; RV32-NEXT: vmv.v.v v28, v24
+; RV32-NEXT: vmv.v.v v20, v24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 6
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs4r.v v20, (a1) # vscale x 32-byte Folded Spill
; RV32-NEXT: lui a1, %hi(.LCPI27_4)
; RV32-NEXT: addi a1, a1, %lo(.LCPI27_4)
; RV32-NEXT: lui a2, %hi(.LCPI27_5)
@@ -891,13 +887,25 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; RV32-NEXT: vle16.v v24, (a2)
; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: vle16.v v16, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 84
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs1r.v v16, (a1) # vscale x 8-byte Folded Spill
; RV32-NEXT: lui a1, %hi(.LCPI27_7)
; RV32-NEXT: addi a1, a1, %lo(.LCPI27_7)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vle16.v v10, (a1)
+; RV32-NEXT: vle16.v v16, (a1)
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 28
+; RV32-NEXT: li a2, 76
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs2r.v v16, (a1) # vscale x 16-byte Folded Spill
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 24
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
@@ -909,18 +917,29 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl4r.v v20, (a1) # vscale x 32-byte Folded Reload
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 84
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v7, (a1) # vscale x 8-byte Folded Reload
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vrgatherei16.vv v24, v20, v8
+; RV32-NEXT: vrgatherei16.vv v24, v20, v7
; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma
; RV32-NEXT: vmv.v.v v24, v16
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 12
-; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: slli a1, a1, 3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v0, (a1) # vscale x 64-byte Folded Reload
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 76
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl2r.v v28, (a1) # vscale x 16-byte Folded Reload
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vrgatherei16.vv v16, v0, v10
+; RV32-NEXT: vrgatherei16.vv v16, v0, v28
; RV32-NEXT: lui a1, %hi(.LCPI27_6)
; RV32-NEXT: addi a1, a1, %lo(.LCPI27_6)
; RV32-NEXT: lui a2, %hi(.LCPI27_8)
@@ -934,7 +953,7 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT: vle16.v v5, (a2)
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 44
+; RV32-NEXT: li a2, 40
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
@@ -942,12 +961,6 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vrgatherei16.vv v0, v20, v4
; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma
; RV32-NEXT: vmv.v.v v0, v16
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 84
-; RV32-NEXT: mul a1, a1, a2
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vrgatherei16.vv v16, v8, v6
; RV32-NEXT: csrr a1, vlenb
@@ -968,7 +981,12 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: addi a1, a0, 192
; RV32-NEXT: vse32.v v24, (a1)
; RV32-NEXT: addi a1, a0, 128
-; RV32-NEXT: vse32.v v28, (a1)
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 6
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vl4r.v v8, (a2) # vscale x 32-byte Folded Reload
+; RV32-NEXT: vse32.v v8, (a1)
; RV32-NEXT: addi a1, a0, 64
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: li a3, 60
diff --git a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
index d995a31..acc6849 100644
--- a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
@@ -416,14 +416,14 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) {
; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v16, v12, a0
; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v12, v8, 1, v0
+; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0
; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v11, v12, v16
-; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v13, v16
-; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v14, v16
-; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v8, v15, v16
+; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v15, v8, v16
+; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v14, v9, v16
+; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v10, v16
+; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v11, v16
; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
+; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v12, 0
; RV32-BITS-UNKNOWN-NEXT: ret
;
; RV32-BITS-256-LABEL: reverse_nxv32i1:
@@ -437,14 +437,14 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) {
; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; RV32-BITS-256-NEXT: vrsub.vx v16, v12, a0
; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV32-BITS-256-NEXT: vmerge.vim v12, v8, 1, v0
+; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0
; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; RV32-BITS-256-NEXT: vrgather.vv v11, v12, v16
-; RV32-BITS-256-NEXT: vrgather.vv v10, v13, v16
-; RV32-BITS-256-NEXT: vrgather.vv v9, v14, v16
-; RV32-BITS-256-NEXT: vrgather.vv v8, v15, v16
+; RV32-BITS-256-NEXT: vrgather.vv v15, v8, v16
+; RV32-BITS-256-NEXT: vrgather.vv v14, v9, v16
+; RV32-BITS-256-NEXT: vrgather.vv v13, v10, v16
+; RV32-BITS-256-NEXT: vrgather.vv v12, v11, v16
; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0
+; RV32-BITS-256-NEXT: vmsne.vi v0, v12, 0
; RV32-BITS-256-NEXT: ret
;
; RV32-BITS-512-LABEL: reverse_nxv32i1:
@@ -458,14 +458,14 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) {
; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; RV32-BITS-512-NEXT: vrsub.vx v16, v12, a0
; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV32-BITS-512-NEXT: vmerge.vim v12, v8, 1, v0
+; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0
; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; RV32-BITS-512-NEXT: vrgather.vv v11, v12, v16
-; RV32-BITS-512-NEXT: vrgather.vv v10, v13, v16
-; RV32-BITS-512-NEXT: vrgather.vv v9, v14, v16
-; RV32-BITS-512-NEXT: vrgather.vv v8, v15, v16
+; RV32-BITS-512-NEXT: vrgather.vv v15, v8, v16
+; RV32-BITS-512-NEXT: vrgather.vv v14, v9, v16
+; RV32-BITS-512-NEXT: vrgather.vv v13, v10, v16
+; RV32-BITS-512-NEXT: vrgather.vv v12, v11, v16
; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0
+; RV32-BITS-512-NEXT: vmsne.vi v0, v12, 0
; RV32-BITS-512-NEXT: ret
;
; RV64-BITS-UNKNOWN-LABEL: reverse_nxv32i1:
@@ -479,14 +479,14 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) {
; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v16, v12, a0
; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v12, v8, 1, v0
+; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0
; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v11, v12, v16
-; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v13, v16
-; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v14, v16
-; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v8, v15, v16
+; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v15, v8, v16
+; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v14, v9, v16
+; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v10, v16
+; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v11, v16
; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
+; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v12, 0
; RV64-BITS-UNKNOWN-NEXT: ret
;
; RV64-BITS-256-LABEL: reverse_nxv32i1:
@@ -500,14 +500,14 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) {
; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; RV64-BITS-256-NEXT: vrsub.vx v16, v12, a0
; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV64-BITS-256-NEXT: vmerge.vim v12, v8, 1, v0
+; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0
; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; RV64-BITS-256-NEXT: vrgather.vv v11, v12, v16
-; RV64-BITS-256-NEXT: vrgather.vv v10, v13, v16
-; RV64-BITS-256-NEXT: vrgather.vv v9, v14, v16
-; RV64-BITS-256-NEXT: vrgather.vv v8, v15, v16
+; RV64-BITS-256-NEXT: vrgather.vv v15, v8, v16
+; RV64-BITS-256-NEXT: vrgather.vv v14, v9, v16
+; RV64-BITS-256-NEXT: vrgather.vv v13, v10, v16
+; RV64-BITS-256-NEXT: vrgather.vv v12, v11, v16
; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0
+; RV64-BITS-256-NEXT: vmsne.vi v0, v12, 0
; RV64-BITS-256-NEXT: ret
;
; RV64-BITS-512-LABEL: reverse_nxv32i1:
@@ -521,14 +521,14 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) {
; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; RV64-BITS-512-NEXT: vrsub.vx v16, v12, a0
; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV64-BITS-512-NEXT: vmerge.vim v12, v8, 1, v0
+; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0
; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; RV64-BITS-512-NEXT: vrgather.vv v11, v12, v16
-; RV64-BITS-512-NEXT: vrgather.vv v10, v13, v16
-; RV64-BITS-512-NEXT: vrgather.vv v9, v14, v16
-; RV64-BITS-512-NEXT: vrgather.vv v8, v15, v16
+; RV64-BITS-512-NEXT: vrgather.vv v15, v8, v16
+; RV64-BITS-512-NEXT: vrgather.vv v14, v9, v16
+; RV64-BITS-512-NEXT: vrgather.vv v13, v10, v16
+; RV64-BITS-512-NEXT: vrgather.vv v12, v11, v16
; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0
+; RV64-BITS-512-NEXT: vmsne.vi v0, v12, 0
; RV64-BITS-512-NEXT: ret
%res = call <vscale x 32 x i1> @llvm.vector.reverse.nxv32i1(<vscale x 32 x i1> %a)
ret <vscale x 32 x i1> %res
diff --git a/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll b/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll
index 4bc6313..1ee7e13 100644
--- a/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/nontemporal-vp-scalable.ll
@@ -37772,18 +37772,18 @@ define void @test_nontemporal_vp_scatter_nxv64i8_P1(<vscale x 64 x i8> %val, <vs
; CHECK-RV32VC-LABEL: test_nontemporal_vp_scatter_nxv64i8_P1:
; CHECK-RV32VC: # %bb.0:
; CHECK-RV32VC-NEXT: csrr a1, vlenb
-; CHECK-RV32VC-NEXT: slli a5, a1, 4
+; CHECK-RV32VC-NEXT: slli a6, a1, 4
; CHECK-RV32VC-NEXT: slli a2, a1, 2
-; CHECK-RV32VC-NEXT: slli a6, a1, 3
+; CHECK-RV32VC-NEXT: slli a5, a1, 3
; CHECK-RV32VC-NEXT: mv a4, a3
; CHECK-RV32VC-NEXT: bltu a3, a2, .LBB915_2
; CHECK-RV32VC-NEXT: # %bb.1:
; CHECK-RV32VC-NEXT: mv a4, a2
; CHECK-RV32VC-NEXT: .LBB915_2:
; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0)
-; CHECK-RV32VC-NEXT: add a7, a0, a5
+; CHECK-RV32VC-NEXT: add a6, a6, a0
; CHECK-RV32VC-NEXT: slli a1, a1, 1
-; CHECK-RV32VC-NEXT: add a0, a0, a6
+; CHECK-RV32VC-NEXT: add a0, a0, a5
; CHECK-RV32VC-NEXT: mv a5, a4
; CHECK-RV32VC-NEXT: bltu a4, a1, .LBB915_4
; CHECK-RV32VC-NEXT: # %bb.3:
@@ -37791,11 +37791,11 @@ define void @test_nontemporal_vp_scatter_nxv64i8_P1(<vscale x 64 x i8> %val, <vs
; CHECK-RV32VC-NEXT: .LBB915_4:
; CHECK-RV32VC-NEXT: addi sp, sp, -16
; CHECK-RV32VC-NEXT: .cfi_def_cfa_offset 16
-; CHECK-RV32VC-NEXT: csrr a6, vlenb
-; CHECK-RV32VC-NEXT: slli a6, a6, 3
-; CHECK-RV32VC-NEXT: sub sp, sp, a6
+; CHECK-RV32VC-NEXT: csrr a7, vlenb
+; CHECK-RV32VC-NEXT: slli a7, a7, 3
+; CHECK-RV32VC-NEXT: sub sp, sp, a7
; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-RV32VC-NEXT: vl8re32.v v24, (a7)
+; CHECK-RV32VC-NEXT: vl8re32.v v24, (a6)
; CHECK-RV32VC-NEXT: addi a6, sp, 16
; CHECK-RV32VC-NEXT: vs8r.v v24, (a6) # vscale x 64-byte Folded Spill
; CHECK-RV32VC-NEXT: vl8re32.v v24, (a0)
@@ -38397,18 +38397,18 @@ define void @test_nontemporal_vp_scatter_nxv64i8_PALL(<vscale x 64 x i8> %val, <
; CHECK-RV32VC-LABEL: test_nontemporal_vp_scatter_nxv64i8_PALL:
; CHECK-RV32VC: # %bb.0:
; CHECK-RV32VC-NEXT: csrr a1, vlenb
-; CHECK-RV32VC-NEXT: slli a5, a1, 4
+; CHECK-RV32VC-NEXT: slli a6, a1, 4
; CHECK-RV32VC-NEXT: slli a2, a1, 2
-; CHECK-RV32VC-NEXT: slli a6, a1, 3
+; CHECK-RV32VC-NEXT: slli a5, a1, 3
; CHECK-RV32VC-NEXT: mv a4, a3
; CHECK-RV32VC-NEXT: bltu a3, a2, .LBB916_2
; CHECK-RV32VC-NEXT: # %bb.1:
; CHECK-RV32VC-NEXT: mv a4, a2
; CHECK-RV32VC-NEXT: .LBB916_2:
; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0)
-; CHECK-RV32VC-NEXT: add a7, a0, a5
+; CHECK-RV32VC-NEXT: add a6, a6, a0
; CHECK-RV32VC-NEXT: slli a1, a1, 1
-; CHECK-RV32VC-NEXT: add a0, a0, a6
+; CHECK-RV32VC-NEXT: add a0, a0, a5
; CHECK-RV32VC-NEXT: mv a5, a4
; CHECK-RV32VC-NEXT: bltu a4, a1, .LBB916_4
; CHECK-RV32VC-NEXT: # %bb.3:
@@ -38416,11 +38416,11 @@ define void @test_nontemporal_vp_scatter_nxv64i8_PALL(<vscale x 64 x i8> %val, <
; CHECK-RV32VC-NEXT: .LBB916_4:
; CHECK-RV32VC-NEXT: addi sp, sp, -16
; CHECK-RV32VC-NEXT: .cfi_def_cfa_offset 16
-; CHECK-RV32VC-NEXT: csrr a6, vlenb
-; CHECK-RV32VC-NEXT: slli a6, a6, 3
-; CHECK-RV32VC-NEXT: sub sp, sp, a6
+; CHECK-RV32VC-NEXT: csrr a7, vlenb
+; CHECK-RV32VC-NEXT: slli a7, a7, 3
+; CHECK-RV32VC-NEXT: sub sp, sp, a7
; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-RV32VC-NEXT: vl8re32.v v24, (a7)
+; CHECK-RV32VC-NEXT: vl8re32.v v24, (a6)
; CHECK-RV32VC-NEXT: addi a6, sp, 16
; CHECK-RV32VC-NEXT: vs8r.v v24, (a6) # vscale x 64-byte Folded Spill
; CHECK-RV32VC-NEXT: vl8re32.v v24, (a0)
@@ -39022,18 +39022,18 @@ define void @test_nontemporal_vp_scatter_nxv64i8_S1(<vscale x 64 x i8> %val, <vs
; CHECK-RV32VC-LABEL: test_nontemporal_vp_scatter_nxv64i8_S1:
; CHECK-RV32VC: # %bb.0:
; CHECK-RV32VC-NEXT: csrr a1, vlenb
-; CHECK-RV32VC-NEXT: slli a5, a1, 4
+; CHECK-RV32VC-NEXT: slli a6, a1, 4
; CHECK-RV32VC-NEXT: slli a2, a1, 2
-; CHECK-RV32VC-NEXT: slli a6, a1, 3
+; CHECK-RV32VC-NEXT: slli a5, a1, 3
; CHECK-RV32VC-NEXT: mv a4, a3
; CHECK-RV32VC-NEXT: bltu a3, a2, .LBB917_2
; CHECK-RV32VC-NEXT: # %bb.1:
; CHECK-RV32VC-NEXT: mv a4, a2
; CHECK-RV32VC-NEXT: .LBB917_2:
; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0)
-; CHECK-RV32VC-NEXT: add a7, a0, a5
+; CHECK-RV32VC-NEXT: add a6, a6, a0
; CHECK-RV32VC-NEXT: slli a1, a1, 1
-; CHECK-RV32VC-NEXT: add a0, a0, a6
+; CHECK-RV32VC-NEXT: add a0, a0, a5
; CHECK-RV32VC-NEXT: mv a5, a4
; CHECK-RV32VC-NEXT: bltu a4, a1, .LBB917_4
; CHECK-RV32VC-NEXT: # %bb.3:
@@ -39041,11 +39041,11 @@ define void @test_nontemporal_vp_scatter_nxv64i8_S1(<vscale x 64 x i8> %val, <vs
; CHECK-RV32VC-NEXT: .LBB917_4:
; CHECK-RV32VC-NEXT: addi sp, sp, -16
; CHECK-RV32VC-NEXT: .cfi_def_cfa_offset 16
-; CHECK-RV32VC-NEXT: csrr a6, vlenb
-; CHECK-RV32VC-NEXT: slli a6, a6, 3
-; CHECK-RV32VC-NEXT: sub sp, sp, a6
+; CHECK-RV32VC-NEXT: csrr a7, vlenb
+; CHECK-RV32VC-NEXT: slli a7, a7, 3
+; CHECK-RV32VC-NEXT: sub sp, sp, a7
; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-RV32VC-NEXT: vl8re32.v v24, (a7)
+; CHECK-RV32VC-NEXT: vl8re32.v v24, (a6)
; CHECK-RV32VC-NEXT: addi a6, sp, 16
; CHECK-RV32VC-NEXT: vs8r.v v24, (a6) # vscale x 64-byte Folded Spill
; CHECK-RV32VC-NEXT: vl8re32.v v24, (a0)
@@ -39647,18 +39647,18 @@ define void @test_nontemporal_vp_scatter_nxv64i8_ALL(<vscale x 64 x i8> %val, <v
; CHECK-RV32VC-LABEL: test_nontemporal_vp_scatter_nxv64i8_ALL:
; CHECK-RV32VC: # %bb.0:
; CHECK-RV32VC-NEXT: csrr a1, vlenb
-; CHECK-RV32VC-NEXT: slli a5, a1, 4
+; CHECK-RV32VC-NEXT: slli a6, a1, 4
; CHECK-RV32VC-NEXT: slli a2, a1, 2
-; CHECK-RV32VC-NEXT: slli a6, a1, 3
+; CHECK-RV32VC-NEXT: slli a5, a1, 3
; CHECK-RV32VC-NEXT: mv a4, a3
; CHECK-RV32VC-NEXT: bltu a3, a2, .LBB918_2
; CHECK-RV32VC-NEXT: # %bb.1:
; CHECK-RV32VC-NEXT: mv a4, a2
; CHECK-RV32VC-NEXT: .LBB918_2:
; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0)
-; CHECK-RV32VC-NEXT: add a7, a0, a5
+; CHECK-RV32VC-NEXT: add a6, a6, a0
; CHECK-RV32VC-NEXT: slli a1, a1, 1
-; CHECK-RV32VC-NEXT: add a0, a0, a6
+; CHECK-RV32VC-NEXT: add a0, a0, a5
; CHECK-RV32VC-NEXT: mv a5, a4
; CHECK-RV32VC-NEXT: bltu a4, a1, .LBB918_4
; CHECK-RV32VC-NEXT: # %bb.3:
@@ -39666,11 +39666,11 @@ define void @test_nontemporal_vp_scatter_nxv64i8_ALL(<vscale x 64 x i8> %val, <v
; CHECK-RV32VC-NEXT: .LBB918_4:
; CHECK-RV32VC-NEXT: addi sp, sp, -16
; CHECK-RV32VC-NEXT: .cfi_def_cfa_offset 16
-; CHECK-RV32VC-NEXT: csrr a6, vlenb
-; CHECK-RV32VC-NEXT: slli a6, a6, 3
-; CHECK-RV32VC-NEXT: sub sp, sp, a6
+; CHECK-RV32VC-NEXT: csrr a7, vlenb
+; CHECK-RV32VC-NEXT: slli a7, a7, 3
+; CHECK-RV32VC-NEXT: sub sp, sp, a7
; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-RV32VC-NEXT: vl8re32.v v24, (a7)
+; CHECK-RV32VC-NEXT: vl8re32.v v24, (a6)
; CHECK-RV32VC-NEXT: addi a6, sp, 16
; CHECK-RV32VC-NEXT: vs8r.v v24, (a6) # vscale x 64-byte Folded Spill
; CHECK-RV32VC-NEXT: vl8re32.v v24, (a0)
@@ -40271,18 +40271,18 @@ define void @test_nontemporal_vp_scatter_nxv64i8_DEFAULT(<vscale x 64 x i8> %val
; CHECK-RV32VC-LABEL: test_nontemporal_vp_scatter_nxv64i8_DEFAULT:
; CHECK-RV32VC: # %bb.0:
; CHECK-RV32VC-NEXT: csrr a1, vlenb
-; CHECK-RV32VC-NEXT: slli a5, a1, 4
+; CHECK-RV32VC-NEXT: slli a6, a1, 4
; CHECK-RV32VC-NEXT: slli a2, a1, 2
-; CHECK-RV32VC-NEXT: slli a6, a1, 3
+; CHECK-RV32VC-NEXT: slli a5, a1, 3
; CHECK-RV32VC-NEXT: mv a4, a3
; CHECK-RV32VC-NEXT: bltu a3, a2, .LBB919_2
; CHECK-RV32VC-NEXT: # %bb.1:
; CHECK-RV32VC-NEXT: mv a4, a2
; CHECK-RV32VC-NEXT: .LBB919_2:
; CHECK-RV32VC-NEXT: vl8re32.v v0, (a0)
-; CHECK-RV32VC-NEXT: add a7, a0, a5
+; CHECK-RV32VC-NEXT: add a6, a6, a0
; CHECK-RV32VC-NEXT: slli a1, a1, 1
-; CHECK-RV32VC-NEXT: add a0, a0, a6
+; CHECK-RV32VC-NEXT: add a0, a0, a5
; CHECK-RV32VC-NEXT: mv a5, a4
; CHECK-RV32VC-NEXT: bltu a4, a1, .LBB919_4
; CHECK-RV32VC-NEXT: # %bb.3:
@@ -40290,11 +40290,11 @@ define void @test_nontemporal_vp_scatter_nxv64i8_DEFAULT(<vscale x 64 x i8> %val
; CHECK-RV32VC-NEXT: .LBB919_4:
; CHECK-RV32VC-NEXT: addi sp, sp, -16
; CHECK-RV32VC-NEXT: .cfi_def_cfa_offset 16
-; CHECK-RV32VC-NEXT: csrr a6, vlenb
-; CHECK-RV32VC-NEXT: slli a6, a6, 3
-; CHECK-RV32VC-NEXT: sub sp, sp, a6
+; CHECK-RV32VC-NEXT: csrr a7, vlenb
+; CHECK-RV32VC-NEXT: slli a7, a7, 3
+; CHECK-RV32VC-NEXT: sub sp, sp, a7
; CHECK-RV32VC-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-RV32VC-NEXT: vl8re32.v v24, (a7)
+; CHECK-RV32VC-NEXT: vl8re32.v v24, (a6)
; CHECK-RV32VC-NEXT: addi a6, sp, 16
; CHECK-RV32VC-NEXT: vs8r.v v24, (a6) # vscale x 64-byte Folded Spill
; CHECK-RV32VC-NEXT: vl8re32.v v24, (a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/remat.ll b/llvm/test/CodeGen/RISCV/rvv/remat.ll
index 06d54fa..95bff27 100644
--- a/llvm/test/CodeGen/RISCV/rvv/remat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/remat.ll
@@ -301,3 +301,135 @@ define void @vfmv.s.f(ptr %p, double %x) {
store volatile double %x, ptr %p
ret void
}
+
+; This test is fairly fragile, but it's trying to cover the case which
+; caused the revert of bba9172 due to interaction with how rematerialize
+; instructions are pruned from the original live interval. In the result
+; below, we remat the vmv.v.x into the loop, but fail to remat the vmv.v.x
+; a second time after further splitting it's live range. We shouldn't need
+; to spill it to the stack at all.
+define i64 @dual_remat(i64 %0, <vscale x 16 x i64> %1, <vscale x 16 x i64> %2, ptr %p) #0 {
+; CHECK-LABEL: dual_remat:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a2, a1, 5
+; CHECK-NEXT: add a1, a2, a1
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x21, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 33 * vlenb
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-NEXT: vmv.v.i v16, 0
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: srli a1, a2, 3
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: add a2, a3, a2
+; CHECK-NEXT: vmv.v.i v0, 0
+; CHECK-NEXT: .LBB8_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: mv a5, a4
+; CHECK-NEXT: slli a4, a4, 3
+; CHECK-NEXT: add a5, a5, a4
+; CHECK-NEXT: slli a4, a4, 1
+; CHECK-NEXT: add a4, a4, a5
+; CHECK-NEXT: add a4, sp, a4
+; CHECK-NEXT: addi a4, a4, 16
+; CHECK-NEXT: vs8r.v v16, (a4) # vscale x 64-byte Folded Spill
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: slli a5, a4, 4
+; CHECK-NEXT: add a4, a5, a4
+; CHECK-NEXT: add a4, sp, a4
+; CHECK-NEXT: addi a4, a4, 16
+; CHECK-NEXT: vs8r.v v8, (a4) # vscale x 64-byte Folded Spill
+; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: mv a5, a4
+; CHECK-NEXT: slli a4, a4, 3
+; CHECK-NEXT: add a5, a5, a4
+; CHECK-NEXT: slli a4, a4, 1
+; CHECK-NEXT: add a4, a4, a5
+; CHECK-NEXT: add a4, sp, a4
+; CHECK-NEXT: addi a4, a4, 16
+; CHECK-NEXT: vl8r.v v16, (a4) # vscale x 64-byte Folded Reload
+; CHECK-NEXT: vand.vv v16, v16, v8
+; CHECK-NEXT: vmsne.vi v24, v16, 0
+; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: slli a4, a4, 4
+; CHECK-NEXT: add a4, sp, a4
+; CHECK-NEXT: addi a4, a4, 16
+; CHECK-NEXT: vs1r.v v24, (a4) # vscale x 8-byte Folded Spill
+; CHECK-NEXT: vand.vv v16, v0, v8
+; CHECK-NEXT: vmsne.vi v8, v16, 0
+; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: mv a5, a4
+; CHECK-NEXT: slli a4, a4, 3
+; CHECK-NEXT: add a5, a5, a4
+; CHECK-NEXT: slli a4, a4, 1
+; CHECK-NEXT: add a4, a4, a5
+; CHECK-NEXT: add a4, sp, a4
+; CHECK-NEXT: addi a4, a4, 16
+; CHECK-NEXT: vl8r.v v16, (a4) # vscale x 64-byte Folded Reload
+; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: slli a4, a4, 4
+; CHECK-NEXT: add a4, sp, a4
+; CHECK-NEXT: addi a4, a4, 16
+; CHECK-NEXT: vl1r.v v9, (a4) # vscale x 8-byte Folded Reload
+; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vslideup.vx v9, v8, a1
+; CHECK-NEXT: vsetvli a4, zero, e8, m2, ta, ma
+; CHECK-NEXT: vcpop.m a4, v9
+; CHECK-NEXT: csrr a5, vlenb
+; CHECK-NEXT: slli a6, a5, 4
+; CHECK-NEXT: add a5, a6, a5
+; CHECK-NEXT: add a5, sp, a5
+; CHECK-NEXT: addi a5, a5, 16
+; CHECK-NEXT: vl8r.v v8, (a5) # vscale x 64-byte Folded Reload
+; CHECK-NEXT: vs8r.v v8, (a3)
+; CHECK-NEXT: vs8r.v v8, (a2)
+; CHECK-NEXT: addi a5, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a5) # vscale x 64-byte Folded Reload
+; CHECK-NEXT: vsetvli a5, zero, e64, m8, ta, ma
+; CHECK-NEXT: vor.vv v16, v16, v8
+; CHECK-NEXT: csrr a5, vlenb
+; CHECK-NEXT: slli a5, a5, 3
+; CHECK-NEXT: add a5, sp, a5
+; CHECK-NEXT: addi a5, a5, 16
+; CHECK-NEXT: vl8r.v v8, (a5) # vscale x 64-byte Folded Reload
+; CHECK-NEXT: vor.vv v0, v0, v8
+; CHECK-NEXT: beqz a4, .LBB8_1
+; CHECK-NEXT: # %bb.2: # %middle.block
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a2, a1, 5
+; CHECK-NEXT: add a1, a2, a1
+; CHECK-NEXT: add sp, sp, a1
+; CHECK-NEXT: .cfi_def_cfa sp, 16
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <vscale x 16 x i64> zeroinitializer, i64 %0, i64 0
+ %broadcast.splat = shufflevector <vscale x 16 x i64> %broadcast.splatinsert, <vscale x 16 x i64> zeroinitializer, <vscale x 16 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %vec.ind = phi <vscale x 16 x i64> [ zeroinitializer, %entry ], [ %vec.ind.next, %vector.body ]
+ %3 = and <vscale x 16 x i64> %vec.ind, %broadcast.splat
+ %4 = icmp ne <vscale x 16 x i64> %3, zeroinitializer
+ store <vscale x 16 x i64> %broadcast.splat, ptr %p
+ %5 = tail call i1 @llvm.vector.reduce.or.nxv16i1(<vscale x 16 x i1> %4)
+ %vec.ind.next = or <vscale x 16 x i64> %vec.ind, %1
+ br i1 %5, label %middle.block, label %vector.body
+
+middle.block: ; preds = %vector.body
+ %and.i = and i64 1, %0
+ ret i64 %and.i
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index 02825b2..19a1841 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -6018,3 +6018,39 @@ vector.latch: ; preds = %for.body419
for.cond.cleanup: ; preds = %vector.latch
ret void
}
+
+;; This is exactly like sink_add_splat except that the splat has operands
+;; which haven't been converted to undef.
+define void @sink_non_canonical_splat(ptr nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_non_canonical_splat:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a2, 1
+; CHECK-NEXT: add a2, a0, a2
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: .LBB131_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vadd.vx v8, v8, a1
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a2, .LBB131_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, ptr %a, i64 %index
+ %wide.load = load <4 x i32>, ptr %0, align 4
+ %1 = add <4 x i32> %wide.load, %broadcast.splat
+ store <4 x i32> %1, ptr %0, align 4
+ %index.next = add nuw i64 %index, 4
+ %2 = icmp eq i64 %index.next, 1024
+ br i1 %2, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/select-bare.ll b/llvm/test/CodeGen/RISCV/select-bare.ll
index 44028a7..550eb94 100644
--- a/llvm/test/CodeGen/RISCV/select-bare.ll
+++ b/llvm/test/CodeGen/RISCV/select-bare.ll
@@ -3,7 +3,7 @@
; RUN: | FileCheck %s -check-prefix=RV32I
; RUN: llc -mtriple=riscv64 -mattr=+xmipscmov -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I-CCMOV %s
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCI
define i32 @bare_select(i1 %a, i32 %b, i32 %c) nounwind {
diff --git a/llvm/test/CodeGen/RISCV/select-cc.ll b/llvm/test/CodeGen/RISCV/select-cc.ll
index b57f625..95f5a9d 100644
--- a/llvm/test/CodeGen/RISCV/select-cc.ll
+++ b/llvm/test/CodeGen/RISCV/select-cc.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -disable-block-placement -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32I %s
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCI
; RUN: llc -mtriple=riscv64 -disable-block-placement -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64I %s
@@ -88,39 +88,38 @@ define signext i32 @foo(i32 signext %a, ptr %b) nounwind {
; RV32IXQCI-LABEL: foo:
; RV32IXQCI: # %bb.0:
; RV32IXQCI-NEXT: lw a2, 0(a1)
-; RV32IXQCI-NEXT: lw a4, 0(a1)
-; RV32IXQCI-NEXT: lw t5, 0(a1)
-; RV32IXQCI-NEXT: lw t4, 0(a1)
-; RV32IXQCI-NEXT: lw t3, 0(a1)
-; RV32IXQCI-NEXT: lw t2, 0(a1)
-; RV32IXQCI-NEXT: lw t0, 0(a1)
-; RV32IXQCI-NEXT: lw a7, 0(a1)
-; RV32IXQCI-NEXT: lw a6, 0(a1)
; RV32IXQCI-NEXT: lw a3, 0(a1)
-; RV32IXQCI-NEXT: lw t1, 0(a1)
+; RV32IXQCI-NEXT: lw a4, 0(a1)
; RV32IXQCI-NEXT: lw a5, 0(a1)
-; RV32IXQCI-NEXT: bltz t1, .LBB0_2
+; RV32IXQCI-NEXT: qc.mvne a0, a0, a2, a2
+; RV32IXQCI-NEXT: qc.mveq a0, a0, a3, a3
+; RV32IXQCI-NEXT: lw a2, 0(a1)
+; RV32IXQCI-NEXT: qc.mvgeu a0, a4, a0, a4
+; RV32IXQCI-NEXT: lw a3, 0(a1)
+; RV32IXQCI-NEXT: qc.mvltu a0, a0, a5, a5
+; RV32IXQCI-NEXT: lw a4, 0(a1)
+; RV32IXQCI-NEXT: qc.mvgeu a0, a0, a2, a2
+; RV32IXQCI-NEXT: lw a2, 0(a1)
+; RV32IXQCI-NEXT: qc.mvltu a0, a3, a0, a3
+; RV32IXQCI-NEXT: lw a3, 0(a1)
+; RV32IXQCI-NEXT: qc.mvge a0, a4, a0, a4
+; RV32IXQCI-NEXT: lw a4, 0(a1)
+; RV32IXQCI-NEXT: qc.mvlt a0, a0, a2, a2
+; RV32IXQCI-NEXT: lw a2, 0(a1)
+; RV32IXQCI-NEXT: qc.mvge a0, a0, a3, a3
+; RV32IXQCI-NEXT: lw a3, 0(a1)
+; RV32IXQCI-NEXT: qc.mvlt a0, a4, a0, a4
+; RV32IXQCI-NEXT: lw a4, 0(a1)
+; RV32IXQCI-NEXT: lw a1, 0(a1)
+; RV32IXQCI-NEXT: blez a2, .LBB0_2
; RV32IXQCI-NEXT: # %bb.1:
-; RV32IXQCI-NEXT: li a5, 0
-; RV32IXQCI-NEXT: qc.mveq a2, a0, a2, a0
-; RV32IXQCI-NEXT: qc.mvne a4, a2, a4, a2
-; RV32IXQCI-NEXT: qc.mvltu t5, t5, a4, a4
-; RV32IXQCI-NEXT: qc.mvgeu t4, t5, t4, t5
-; RV32IXQCI-NEXT: qc.mvltu t3, t4, t3, t4
-; RV32IXQCI-NEXT: qc.mvgeu t2, t2, t3, t3
-; RV32IXQCI-NEXT: qc.mvlt t0, t0, t2, t2
-; RV32IXQCI-NEXT: qc.mvge a7, t0, a7, t0
-; RV32IXQCI-NEXT: qc.mvlt a6, a7, a6, a7
-; RV32IXQCI-NEXT: qc.mvge a3, a3, a6, a6
-; RV32IXQCI-NEXT: qc.mvlt a3, a5, t1, t1
-; RV32IXQCI-NEXT: mv a5, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: .LBB0_2:
-; RV32IXQCI-NEXT: lw a2, 0(a1)
-; RV32IXQCI-NEXT: lw a0, 0(a1)
-; RV32IXQCI-NEXT: li a1, 1024
-; RV32IXQCI-NEXT: qc.mvlt a2, a1, a2, a5
-; RV32IXQCI-NEXT: li a1, 2046
-; RV32IXQCI-NEXT: qc.mvltu a0, a1, t1, a2
+; RV32IXQCI-NEXT: qc.mvlti a0, a2, 0, a3
+; RV32IXQCI-NEXT: li a3, 1024
+; RV32IXQCI-NEXT: qc.mvge a0, a3, a4, a4
+; RV32IXQCI-NEXT: li a3, 2046
+; RV32IXQCI-NEXT: qc.mvgeu a0, a3, a2, a1
; RV32IXQCI-NEXT: ret
;
; RV64I-LABEL: foo:
diff --git a/llvm/test/CodeGen/RISCV/select-cond.ll b/llvm/test/CodeGen/RISCV/select-cond.ll
index 3ca0f46..a3c48737 100644
--- a/llvm/test/CodeGen/RISCV/select-cond.ll
+++ b/llvm/test/CodeGen/RISCV/select-cond.ll
@@ -7,7 +7,7 @@
; RUN: | FileCheck %s --check-prefixes=RV32-XQCICM
; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcics -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32-XQCICS
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCI
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV64
diff --git a/llvm/test/CodeGen/RISCV/select-const.ll b/llvm/test/CodeGen/RISCV/select-const.ll
index 65d10bb..dfac6e1 100644
--- a/llvm/test/CodeGen/RISCV/select-const.ll
+++ b/llvm/test/CodeGen/RISCV/select-const.ll
@@ -5,7 +5,7 @@
; RUN: | FileCheck -check-prefixes=RV32,RV32IF %s
; RUN: llc -mtriple=riscv32 -mattr=+zicond -target-abi=ilp32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32,RV32ZICOND %s
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCI
; RUN: llc -mtriple=riscv64 -target-abi=lp64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64,RV64I %s
@@ -579,9 +579,9 @@ define i32 @select_slt_zero_constant1_constant2(i32 signext %x) {
;
; RV32IXQCI-LABEL: select_slt_zero_constant1_constant2:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: srai a0, a0, 31
-; RV32IXQCI-NEXT: andi a0, a0, 10
-; RV32IXQCI-NEXT: addi a0, a0, -3
+; RV32IXQCI-NEXT: li a1, -3
+; RV32IXQCI-NEXT: qc.lilti a1, a0, 0, 7
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_slt_zero_constant1_constant2:
@@ -605,9 +605,9 @@ define i32 @select_sgt_negative_one_constant1_constant2(i32 signext %x) {
;
; RV32IXQCI-LABEL: select_sgt_negative_one_constant1_constant2:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: srai a0, a0, 31
-; RV32IXQCI-NEXT: andi a0, a0, -10
-; RV32IXQCI-NEXT: addi a0, a0, 7
+; RV32IXQCI-NEXT: li a1, -3
+; RV32IXQCI-NEXT: qc.ligei a1, a0, 0, 7
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_sgt_negative_one_constant1_constant2:
@@ -653,12 +653,10 @@ define i32 @select_nonnegative_lui_addi(i32 signext %x) {
;
; RV32IXQCI-LABEL: select_nonnegative_lui_addi:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: mv a1, a0
-; RV32IXQCI-NEXT: lui a0, 4
-; RV32IXQCI-NEXT: bgez a1, .LBB21_2
-; RV32IXQCI-NEXT: # %bb.1:
-; RV32IXQCI-NEXT: li a0, 25
-; RV32IXQCI-NEXT: .LBB21_2:
+; RV32IXQCI-NEXT: lui a2, 4
+; RV32IXQCI-NEXT: li a1, 25
+; RV32IXQCI-NEXT: qc.mvgei a1, a0, 0, a2
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64I-LABEL: select_nonnegative_lui_addi:
@@ -726,12 +724,10 @@ define i32 @select_nonnegative_lui_addi_swapped(i32 signext %x) {
;
; RV32IXQCI-LABEL: select_nonnegative_lui_addi_swapped:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: bgez a0, .LBB22_2
-; RV32IXQCI-NEXT: # %bb.1:
-; RV32IXQCI-NEXT: lui a0, 4
-; RV32IXQCI-NEXT: ret
-; RV32IXQCI-NEXT: .LBB22_2:
-; RV32IXQCI-NEXT: li a0, 25
+; RV32IXQCI-NEXT: li a2, 25
+; RV32IXQCI-NEXT: lui a1, 4
+; RV32IXQCI-NEXT: qc.mvgei a1, a0, 0, a2
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64I-LABEL: select_nonnegative_lui_addi_swapped:
@@ -801,13 +797,13 @@ define i32 @diff_shl_addi(i32 signext %x) {
;
; RV32IXQCI-LABEL: diff_shl_addi:
; RV32IXQCI: # %bb.0:
+; RV32IXQCI-NEXT: lui a2, 4
+; RV32IXQCI-NEXT: li a1, 25
; RV32IXQCI-NEXT: bgez a0, .LBB23_2
; RV32IXQCI-NEXT: # %bb.1:
-; RV32IXQCI-NEXT: lui a0, 4
-; RV32IXQCI-NEXT: addi a0, a0, 25
-; RV32IXQCI-NEXT: ret
+; RV32IXQCI-NEXT: addi a1, a2, 25
; RV32IXQCI-NEXT: .LBB23_2:
-; RV32IXQCI-NEXT: li a0, 25
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64I-LABEL: diff_shl_addi:
@@ -876,13 +872,13 @@ define i32 @diff_shl_addi2(i32 signext %x) {
;
; RV32IXQCI-LABEL: diff_shl_addi2:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: bgez a0, .LBB24_2
+; RV32IXQCI-NEXT: lui a2, 4
+; RV32IXQCI-NEXT: li a1, 25
+; RV32IXQCI-NEXT: bltz a0, .LBB24_2
; RV32IXQCI-NEXT: # %bb.1:
-; RV32IXQCI-NEXT: li a0, 25
-; RV32IXQCI-NEXT: ret
+; RV32IXQCI-NEXT: addi a1, a2, 25
; RV32IXQCI-NEXT: .LBB24_2:
-; RV32IXQCI-NEXT: lui a0, 4
-; RV32IXQCI-NEXT: addi a0, a0, 25
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64I-LABEL: diff_shl_addi2:
@@ -929,9 +925,10 @@ define i32 @diff_pow2_24_16(i32 signext %x) {
;
; RV32IXQCI-LABEL: diff_pow2_24_16:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: srai a0, a0, 31
-; RV32IXQCI-NEXT: andi a0, a0, -8
-; RV32IXQCI-NEXT: addi a0, a0, 24
+; RV32IXQCI-NEXT: li a2, 24
+; RV32IXQCI-NEXT: li a1, 16
+; RV32IXQCI-NEXT: qc.mvgei a1, a0, 0, a2
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: diff_pow2_24_16:
@@ -955,9 +952,10 @@ define i32 @diff_pow2_16_24(i32 signext %x) {
;
; RV32IXQCI-LABEL: diff_pow2_16_24:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: srli a0, a0, 28
-; RV32IXQCI-NEXT: andi a0, a0, 8
-; RV32IXQCI-NEXT: addi a0, a0, 16
+; RV32IXQCI-NEXT: li a2, 16
+; RV32IXQCI-NEXT: li a1, 24
+; RV32IXQCI-NEXT: qc.mvgei a1, a0, 0, a2
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: diff_pow2_16_24:
@@ -1008,14 +1006,14 @@ define i32 @zext_or_constant(i32 signext %x) {
;
; RV32IXQCI-LABEL: zext_or_constant:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: bgez a0, .LBB27_2
+; RV32IXQCI-NEXT: srli a2, a0, 31
+; RV32IXQCI-NEXT: lui a1, 140
+; RV32IXQCI-NEXT: addi a1, a1, 417
+; RV32IXQCI-NEXT: bltz a0, .LBB27_2
; RV32IXQCI-NEXT: # %bb.1:
-; RV32IXQCI-NEXT: lui a0, 140
-; RV32IXQCI-NEXT: addi a0, a0, 417
-; RV32IXQCI-NEXT: ret
+; RV32IXQCI-NEXT: xori a1, a2, 1
; RV32IXQCI-NEXT: .LBB27_2:
-; RV32IXQCI-NEXT: srli a0, a0, 31
-; RV32IXQCI-NEXT: xori a0, a0, 1
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64I-LABEL: zext_or_constant:
@@ -1095,14 +1093,14 @@ define i32 @zext_or_constant2(i32 signext %x) {
;
; RV32IXQCI-LABEL: zext_or_constant2:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: bltz a0, .LBB28_2
+; RV32IXQCI-NEXT: srli a2, a0, 31
+; RV32IXQCI-NEXT: lui a1, 140
+; RV32IXQCI-NEXT: addi a1, a1, 417
+; RV32IXQCI-NEXT: bgez a0, .LBB28_2
; RV32IXQCI-NEXT: # %bb.1:
-; RV32IXQCI-NEXT: lui a0, 140
-; RV32IXQCI-NEXT: addi a0, a0, 417
-; RV32IXQCI-NEXT: ret
+; RV32IXQCI-NEXT: xori a1, a2, 1
; RV32IXQCI-NEXT: .LBB28_2:
-; RV32IXQCI-NEXT: srli a0, a0, 31
-; RV32IXQCI-NEXT: xori a0, a0, 1
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64I-LABEL: zext_or_constant2:
@@ -1183,14 +1181,14 @@ define i32 @sext_or_constant(i32 signext %x) {
;
; RV32IXQCI-LABEL: sext_or_constant:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: bgez a0, .LBB29_2
+; RV32IXQCI-NEXT: srli a2, a0, 31
+; RV32IXQCI-NEXT: lui a1, 140
+; RV32IXQCI-NEXT: addi a1, a1, 417
+; RV32IXQCI-NEXT: bltz a0, .LBB29_2
; RV32IXQCI-NEXT: # %bb.1:
-; RV32IXQCI-NEXT: lui a0, 140
-; RV32IXQCI-NEXT: addi a0, a0, 417
-; RV32IXQCI-NEXT: ret
+; RV32IXQCI-NEXT: addi a1, a2, -1
; RV32IXQCI-NEXT: .LBB29_2:
-; RV32IXQCI-NEXT: srli a0, a0, 31
-; RV32IXQCI-NEXT: addi a0, a0, -1
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64I-LABEL: sext_or_constant:
@@ -1271,14 +1269,14 @@ define i32 @sext_or_constant2(i32 signext %x) {
;
; RV32IXQCI-LABEL: sext_or_constant2:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: bltz a0, .LBB30_2
+; RV32IXQCI-NEXT: srli a2, a0, 31
+; RV32IXQCI-NEXT: lui a1, 140
+; RV32IXQCI-NEXT: addi a1, a1, 417
+; RV32IXQCI-NEXT: bgez a0, .LBB30_2
; RV32IXQCI-NEXT: # %bb.1:
-; RV32IXQCI-NEXT: lui a0, 140
-; RV32IXQCI-NEXT: addi a0, a0, 417
-; RV32IXQCI-NEXT: ret
+; RV32IXQCI-NEXT: addi a1, a2, -1
; RV32IXQCI-NEXT: .LBB30_2:
-; RV32IXQCI-NEXT: srli a0, a0, 31
-; RV32IXQCI-NEXT: addi a0, a0, -1
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64I-LABEL: sext_or_constant2:
@@ -1332,9 +1330,9 @@ define i32 @select_0_6(i32 signext %x) {
;
; RV32IXQCI-LABEL: select_0_6:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: srai a0, a0, 2
-; RV32IXQCI-NEXT: srli a0, a0, 30
-; RV32IXQCI-NEXT: slli a0, a0, 1
+; RV32IXQCI-NEXT: li a1, 6
+; RV32IXQCI-NEXT: qc.ligei a1, a0, 0, 0
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_0_6:
@@ -1358,9 +1356,9 @@ define i32 @select_6_0(i32 signext %x) {
;
; RV32IXQCI-LABEL: select_6_0:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: srli a0, a0, 31
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: andi a0, a0, 6
+; RV32IXQCI-NEXT: li a1, 0
+; RV32IXQCI-NEXT: qc.ligei a1, a0, 0, 6
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_6_0:
@@ -1383,8 +1381,9 @@ define i32 @select_0_394(i32 signext %x) {
;
; RV32IXQCI-LABEL: select_0_394:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: srai a0, a0, 31
-; RV32IXQCI-NEXT: andi a0, a0, 394
+; RV32IXQCI-NEXT: li a1, 394
+; RV32IXQCI-NEXT: qc.ligei a1, a0, 0, 0
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_0_394:
@@ -1407,9 +1406,9 @@ define i32 @select_394_0(i32 signext %x) {
;
; RV32IXQCI-LABEL: select_394_0:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: srli a0, a0, 31
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: andi a0, a0, 394
+; RV32IXQCI-NEXT: li a1, 394
+; RV32IXQCI-NEXT: qc.lilti a1, a0, 0, 0
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_394_0:
diff --git a/llvm/test/CodeGen/RISCV/select.ll b/llvm/test/CodeGen/RISCV/select.ll
index 8273c65..1eb47e4c 100644
--- a/llvm/test/CodeGen/RISCV/select.ll
+++ b/llvm/test/CodeGen/RISCV/select.ll
@@ -4,7 +4,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+m,+xventanacondops -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64IMXVTCONDOPS %s
; RUN: llc -mtriple=riscv32 -mattr=+m,+zicond -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,CHECKZICOND,RV32IMZICOND %s
; RUN: llc -mtriple=riscv64 -mattr=+m,+zicond -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,CHECKZICOND,RV64IMZICOND %s
-; RUN: llc -mtriple=riscv32 -mattr=+m,+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+m,+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCI
define i16 @select_xor_1(i16 %A, i8 %cond) {
@@ -44,10 +44,11 @@ define i16 @select_xor_1(i16 %A, i8 %cond) {
;
; RV32IXQCI-LABEL: select_xor_1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: slli a1, a1, 31
-; RV32IXQCI-NEXT: srai a1, a1, 31
-; RV32IXQCI-NEXT: andi a1, a1, 43
-; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: andi a1, a1, 1
+; RV32IXQCI-NEXT: beqz a1, .LBB0_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: xori a0, a0, 43
+; RV32IXQCI-NEXT: .LBB0_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -102,10 +103,11 @@ define i16 @select_xor_1b(i16 %A, i8 %cond) {
;
; RV32IXQCI-LABEL: select_xor_1b:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: slli a1, a1, 31
-; RV32IXQCI-NEXT: srai a1, a1, 31
-; RV32IXQCI-NEXT: andi a1, a1, 43
-; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: andi a1, a1, 1
+; RV32IXQCI-NEXT: beqz a1, .LBB1_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: xori a0, a0, 43
+; RV32IXQCI-NEXT: .LBB1_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -148,10 +150,11 @@ define i32 @select_xor_2(i32 %A, i32 %B, i8 %cond) {
;
; RV32IXQCI-LABEL: select_xor_2:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: slli a2, a2, 31
-; RV32IXQCI-NEXT: srai a2, a2, 31
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB2_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: .LBB2_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -196,10 +199,11 @@ define i32 @select_xor_2b(i32 %A, i32 %B, i8 %cond) {
;
; RV32IXQCI-LABEL: select_xor_2b:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: slli a2, a2, 31
-; RV32IXQCI-NEXT: srai a2, a2, 31
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB3_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: .LBB3_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -221,9 +225,10 @@ define i16 @select_xor_3(i16 %A, i8 %cond) {
; RV32IXQCI-LABEL: select_xor_3:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: andi a1, a1, 1
-; RV32IXQCI-NEXT: addi a1, a1, -1
-; RV32IXQCI-NEXT: andi a1, a1, 43
-; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: bnez a1, .LBB4_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: xori a0, a0, 43
+; RV32IXQCI-NEXT: .LBB4_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -247,9 +252,10 @@ define i16 @select_xor_3b(i16 %A, i8 %cond) {
; RV32IXQCI-LABEL: select_xor_3b:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: andi a1, a1, 1
-; RV32IXQCI-NEXT: addi a1, a1, -1
-; RV32IXQCI-NEXT: andi a1, a1, 43
-; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: bnez a1, .LBB5_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: xori a0, a0, 43
+; RV32IXQCI-NEXT: .LBB5_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -293,9 +299,10 @@ define i32 @select_xor_4(i32 %A, i32 %B, i8 %cond) {
; RV32IXQCI-LABEL: select_xor_4:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: andi a2, a2, 1
-; RV32IXQCI-NEXT: addi a2, a2, -1
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: bnez a2, .LBB6_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: .LBB6_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -341,9 +348,10 @@ define i32 @select_xor_4b(i32 %A, i32 %B, i8 %cond) {
; RV32IXQCI-LABEL: select_xor_4b:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: andi a2, a2, 1
-; RV32IXQCI-NEXT: addi a2, a2, -1
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: bnez a2, .LBB7_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: xor a0, a0, a1
+; RV32IXQCI-NEXT: .LBB7_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -382,9 +390,12 @@ define i32 @select_xor_5(i1 zeroext %cond, i32 %x) {
;
; RV32IXQCI-LABEL: select_xor_5:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: and a0, a0, a1
-; RV32IXQCI-NEXT: xori a0, a0, 128
+; RV32IXQCI-NEXT: li a2, 128
+; RV32IXQCI-NEXT: bnez a0, .LBB8_2
+; RV32IXQCI-NEXT: # %bb.1:
+; RV32IXQCI-NEXT: xori a2, a1, 128
+; RV32IXQCI-NEXT: .LBB8_2:
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
%add = xor i32 %x, 128
%sel = select i1 %cond, i32 128, i32 %add
@@ -424,10 +435,11 @@ define i32 @select_or(i32 %A, i32 %B, i8 %cond) {
;
; RV32IXQCI-LABEL: select_or:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: slli a2, a2, 31
-; RV32IXQCI-NEXT: srai a2, a2, 31
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB9_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB9_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -472,10 +484,11 @@ define i32 @select_or_b(i32 %A, i32 %B, i8 %cond) {
;
; RV32IXQCI-LABEL: select_or_b:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: slli a2, a2, 31
-; RV32IXQCI-NEXT: srai a2, a2, 31
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB10_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB10_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -518,10 +531,11 @@ define i32 @select_or_1(i32 %A, i32 %B, i32 %cond) {
;
; RV32IXQCI-LABEL: select_or_1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: slli a2, a2, 31
-; RV32IXQCI-NEXT: srai a2, a2, 31
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB11_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB11_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i32 %cond, 1
@@ -566,10 +580,11 @@ define i32 @select_or_1b(i32 %A, i32 %B, i32 %cond) {
;
; RV32IXQCI-LABEL: select_or_1b:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: slli a2, a2, 31
-; RV32IXQCI-NEXT: srai a2, a2, 31
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: andi a2, a2, 1
+; RV32IXQCI-NEXT: beqz a2, .LBB12_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB12_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i32 %cond, 1
@@ -613,9 +628,10 @@ define i32 @select_or_2(i32 %A, i32 %B, i8 %cond) {
; RV32IXQCI-LABEL: select_or_2:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: andi a2, a2, 1
-; RV32IXQCI-NEXT: addi a2, a2, -1
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: bnez a2, .LBB13_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB13_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -661,9 +677,10 @@ define i32 @select_or_2b(i32 %A, i32 %B, i8 %cond) {
; RV32IXQCI-LABEL: select_or_2b:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: andi a2, a2, 1
-; RV32IXQCI-NEXT: addi a2, a2, -1
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: bnez a2, .LBB14_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB14_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i8 %cond, 1
@@ -707,9 +724,10 @@ define i32 @select_or_3(i32 %A, i32 %B, i32 %cond) {
; RV32IXQCI-LABEL: select_or_3:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: andi a2, a2, 1
-; RV32IXQCI-NEXT: addi a2, a2, -1
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: bnez a2, .LBB15_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB15_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i32 %cond, 1
@@ -755,9 +773,10 @@ define i32 @select_or_3b(i32 %A, i32 %B, i32 %cond) {
; RV32IXQCI-LABEL: select_or_3b:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: andi a2, a2, 1
-; RV32IXQCI-NEXT: addi a2, a2, -1
-; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: bnez a2, .LBB16_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
; RV32IXQCI-NEXT: or a0, a0, a1
+; RV32IXQCI-NEXT: .LBB16_2: # %entry
; RV32IXQCI-NEXT: ret
entry:
%and = and i32 %cond, 1
@@ -796,9 +815,12 @@ define i32 @select_or_4(i1 zeroext %cond, i32 %x) {
;
; RV32IXQCI-LABEL: select_or_4:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: and a0, a0, a1
-; RV32IXQCI-NEXT: ori a0, a0, 128
+; RV32IXQCI-NEXT: li a2, 128
+; RV32IXQCI-NEXT: bnez a0, .LBB17_2
+; RV32IXQCI-NEXT: # %bb.1:
+; RV32IXQCI-NEXT: ori a2, a1, 128
+; RV32IXQCI-NEXT: .LBB17_2:
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
%add = or i32 %x, 128
%sel = select i1 %cond, i32 128, i32 %add
@@ -840,9 +862,11 @@ define i32 @select_add_1(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_add_1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: neg a0, a0
-; RV32IXQCI-NEXT: and a0, a0, a1
-; RV32IXQCI-NEXT: add a0, a0, a2
+; RV32IXQCI-NEXT: beqz a0, .LBB18_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: add a2, a2, a1
+; RV32IXQCI-NEXT: .LBB18_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%c = add i32 %a, %b
@@ -885,9 +909,11 @@ define i32 @select_add_2(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_add_2:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: and a0, a0, a2
-; RV32IXQCI-NEXT: add a0, a0, a1
+; RV32IXQCI-NEXT: bnez a0, .LBB19_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: add a1, a1, a2
+; RV32IXQCI-NEXT: .LBB19_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = add i32 %a, %b
@@ -933,9 +959,11 @@ define i32 @select_add_3(i1 zeroext %cond, i32 %a) {
;
; RV32IXQCI-LABEL: select_add_3:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: andi a0, a0, 42
-; RV32IXQCI-NEXT: add a0, a0, a1
+; RV32IXQCI-NEXT: bnez a0, .LBB20_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: addi a1, a1, 42
+; RV32IXQCI-NEXT: .LBB20_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = add i32 %a, 42
@@ -978,9 +1006,12 @@ define i32 @select_add_4(i1 zeroext %cond, i32 %x) {
;
; RV32IXQCI-LABEL: select_add_4:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: and a0, a0, a1
-; RV32IXQCI-NEXT: addi a0, a0, 128
+; RV32IXQCI-NEXT: li a2, 128
+; RV32IXQCI-NEXT: bnez a0, .LBB21_2
+; RV32IXQCI-NEXT: # %bb.1:
+; RV32IXQCI-NEXT: addi a2, a1, 128
+; RV32IXQCI-NEXT: .LBB21_2:
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
%add = add i32 %x, 128
%sel = select i1 %cond, i32 128, i32 %add
@@ -1029,12 +1060,14 @@ define i64 @select_add_5(i1 zeroext %cond, i64 %x) {
;
; RV32IXQCI-LABEL: select_add_5:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: addi a3, a0, -1
-; RV32IXQCI-NEXT: and a1, a1, a3
-; RV32IXQCI-NEXT: addi a0, a1, 128
-; RV32IXQCI-NEXT: sltu a1, a0, a1
-; RV32IXQCI-NEXT: and a2, a2, a3
-; RV32IXQCI-NEXT: add a1, a1, a2
+; RV32IXQCI-NEXT: mv a3, a0
+; RV32IXQCI-NEXT: addi a4, a1, 128
+; RV32IXQCI-NEXT: sltu a0, a4, a1
+; RV32IXQCI-NEXT: add a2, a2, a0
+; RV32IXQCI-NEXT: li a0, 128
+; RV32IXQCI-NEXT: qc.mveqi a0, a3, 0, a4
+; RV32IXQCI-NEXT: qc.selectieqi a3, 0, a2, 0
+; RV32IXQCI-NEXT: mv a1, a3
; RV32IXQCI-NEXT: ret
%add = add i64 %x, 128
%sel = select i1 %cond, i64 128, i64 %add
@@ -1093,14 +1126,15 @@ define i64 @select_add_6(i1 zeroext %cond, i64 %x) {
;
; RV32IXQCI-LABEL: select_add_6:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: addi a3, a0, -1
+; RV32IXQCI-NEXT: mv a3, a0
; RV32IXQCI-NEXT: lui a0, 14
-; RV32IXQCI-NEXT: and a1, a1, a3
-; RV32IXQCI-NEXT: addi a0, a0, 1005
-; RV32IXQCI-NEXT: add a0, a0, a1
+; RV32IXQCI-NEXT: addi a4, a0, 1005
+; RV32IXQCI-NEXT: add a0, a1, a4
; RV32IXQCI-NEXT: sltu a1, a0, a1
-; RV32IXQCI-NEXT: and a2, a2, a3
; RV32IXQCI-NEXT: add a1, a1, a2
+; RV32IXQCI-NEXT: qc.mvnei a0, a3, 0, a4
+; RV32IXQCI-NEXT: qc.selectieqi a3, 0, a1, 0
+; RV32IXQCI-NEXT: mv a1, a3
; RV32IXQCI-NEXT: ret
%add = add i64 %x, 58349
%sel = select i1 %cond, i64 58349, i64 %add
@@ -1152,9 +1186,11 @@ define i32 @select_sub_1(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_sub_1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: sub a1, a1, a2
-; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
-; RV32IXQCI-NEXT: mv a0, a1
+; RV32IXQCI-NEXT: beqz a0, .LBB24_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: sub a2, a1, a2
+; RV32IXQCI-NEXT: .LBB24_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%c = sub i32 %a, %b
@@ -1197,9 +1233,11 @@ define i32 @select_sub_2(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_sub_2:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: and a0, a0, a2
-; RV32IXQCI-NEXT: sub a0, a1, a0
+; RV32IXQCI-NEXT: bnez a0, .LBB25_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: sub a1, a1, a2
+; RV32IXQCI-NEXT: .LBB25_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = sub i32 %a, %b
@@ -1245,9 +1283,11 @@ define i32 @select_sub_3(i1 zeroext %cond, i32 %a) {
;
; RV32IXQCI-LABEL: select_sub_3:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: andi a0, a0, 42
-; RV32IXQCI-NEXT: sub a0, a1, a0
+; RV32IXQCI-NEXT: bnez a0, .LBB26_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: addi a1, a1, -42
+; RV32IXQCI-NEXT: .LBB26_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = sub i32 %a, 42
@@ -1301,10 +1341,12 @@ define i32 @select_sub_4(i1 zeroext %cond, i32 %x) {
;
; RV32IXQCI-LABEL: select_sub_4:
; RV32IXQCI: # %bb.0:
+; RV32IXQCI-NEXT: li a2, 128
+; RV32IXQCI-NEXT: bnez a0, .LBB27_2
+; RV32IXQCI-NEXT: # %bb.1:
; RV32IXQCI-NEXT: addi a2, a1, -128
-; RV32IXQCI-NEXT: li a1, 128
-; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
-; RV32IXQCI-NEXT: mv a0, a1
+; RV32IXQCI-NEXT: .LBB27_2:
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
%add = sub i32 %x, 128
%sel = select i1 %cond, i32 128, i32 %add
@@ -1347,9 +1389,11 @@ define i32 @select_and_1(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_and_1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: and a1, a1, a2
-; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
-; RV32IXQCI-NEXT: mv a0, a1
+; RV32IXQCI-NEXT: beqz a0, .LBB28_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: and a2, a2, a1
+; RV32IXQCI-NEXT: .LBB28_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%c = and i32 %a, %b
@@ -1392,9 +1436,11 @@ define i32 @select_and_2(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_and_2:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: and a2, a2, a1
-; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: bnez a0, .LBB29_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: and a1, a1, a2
+; RV32IXQCI-NEXT: .LBB29_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = and i32 %a, %b
@@ -1437,9 +1483,11 @@ define i32 @select_and_3(i1 zeroext %cond, i32 %a) {
;
; RV32IXQCI-LABEL: select_and_3:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: andi a2, a1, 42
-; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: bnez a0, .LBB30_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: andi a1, a1, 42
+; RV32IXQCI-NEXT: .LBB30_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = and i32 %a, 42
@@ -1626,9 +1674,11 @@ define i32 @select_udiv_3(i1 zeroext %cond, i32 %a) {
; RV32IXQCI-NEXT: lui a3, 199729
; RV32IXQCI-NEXT: addi a3, a3, -975
; RV32IXQCI-NEXT: mulhu a2, a2, a3
-; RV32IXQCI-NEXT: srli a2, a2, 2
-; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: bnez a0, .LBB33_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: srli a1, a2, 2
+; RV32IXQCI-NEXT: .LBB33_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = udiv i32 %a, 42
@@ -1681,9 +1731,11 @@ define i32 @select_shl_1(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_shl_1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: sll a1, a1, a2
-; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
-; RV32IXQCI-NEXT: mv a0, a1
+; RV32IXQCI-NEXT: beqz a0, .LBB34_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: sll a2, a1, a2
+; RV32IXQCI-NEXT: .LBB34_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%c = shl i32 %a, %b
@@ -1726,9 +1778,11 @@ define i32 @select_shl_2(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_shl_2:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: and a0, a0, a2
-; RV32IXQCI-NEXT: sll a0, a1, a0
+; RV32IXQCI-NEXT: bnez a0, .LBB35_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: sll a1, a1, a2
+; RV32IXQCI-NEXT: .LBB35_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = shl i32 %a, %b
@@ -1797,9 +1851,11 @@ define i32 @select_ashr_1(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_ashr_1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: sra a1, a1, a2
-; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
-; RV32IXQCI-NEXT: mv a0, a1
+; RV32IXQCI-NEXT: beqz a0, .LBB37_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: sra a2, a1, a2
+; RV32IXQCI-NEXT: .LBB37_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%c = ashr i32 %a, %b
@@ -1842,9 +1898,11 @@ define i32 @select_ashr_2(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_ashr_2:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: and a0, a0, a2
-; RV32IXQCI-NEXT: sra a0, a1, a0
+; RV32IXQCI-NEXT: bnez a0, .LBB38_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: sra a1, a1, a2
+; RV32IXQCI-NEXT: .LBB38_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = ashr i32 %a, %b
@@ -1913,9 +1971,11 @@ define i32 @select_lshr_1(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_lshr_1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: srl a1, a1, a2
-; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
-; RV32IXQCI-NEXT: mv a0, a1
+; RV32IXQCI-NEXT: beqz a0, .LBB40_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: srl a2, a1, a2
+; RV32IXQCI-NEXT: .LBB40_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%c = lshr i32 %a, %b
@@ -1958,9 +2018,11 @@ define i32 @select_lshr_2(i1 zeroext %cond, i32 %a, i32 %b) {
;
; RV32IXQCI-LABEL: select_lshr_2:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: addi a0, a0, -1
-; RV32IXQCI-NEXT: and a0, a0, a2
-; RV32IXQCI-NEXT: srl a0, a1, a0
+; RV32IXQCI-NEXT: bnez a0, .LBB41_2
+; RV32IXQCI-NEXT: # %bb.1: # %entry
+; RV32IXQCI-NEXT: srl a1, a1, a2
+; RV32IXQCI-NEXT: .LBB41_2: # %entry
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = lshr i32 %a, %b
@@ -2304,11 +2366,13 @@ define i32 @select_cst3(i1 zeroext %cond) {
;
; RV32IXQCI-LABEL: select_cst3:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: lui a1, 7
-; RV32IXQCI-NEXT: lui a2, 5
-; RV32IXQCI-NEXT: addi a3, a1, 1328
-; RV32IXQCI-NEXT: addi a1, a2, -480
-; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a3
+; RV32IXQCI-NEXT: lui a2, 7
+; RV32IXQCI-NEXT: lui a1, 5
+; RV32IXQCI-NEXT: addi a1, a1, -480
+; RV32IXQCI-NEXT: beqz a0, .LBB51_2
+; RV32IXQCI-NEXT: # %bb.1:
+; RV32IXQCI-NEXT: addi a1, a2, 1328
+; RV32IXQCI-NEXT: .LBB51_2:
; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
%ret = select i1 %cond, i32 30000, i32 20000
@@ -2370,10 +2434,12 @@ define i32 @select_cst5(i1 zeroext %cond) {
;
; RV32IXQCI-LABEL: select_cst5:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: lui a1, 1
-; RV32IXQCI-NEXT: addi a2, a1, -2047
+; RV32IXQCI-NEXT: lui a2, 1
; RV32IXQCI-NEXT: li a1, 2047
-; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
+; RV32IXQCI-NEXT: bnez a0, .LBB53_2
+; RV32IXQCI-NEXT: # %bb.1:
+; RV32IXQCI-NEXT: addi a1, a2, -2047
+; RV32IXQCI-NEXT: .LBB53_2:
; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
%ret = select i1 %cond, i32 2047, i32 2049
@@ -2417,10 +2483,12 @@ define i32 @select_cst5_invert(i1 zeroext %cond) {
;
; RV32IXQCI-LABEL: select_cst5_invert:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: lui a1, 1
-; RV32IXQCI-NEXT: addi a2, a1, -2047
+; RV32IXQCI-NEXT: lui a2, 1
; RV32IXQCI-NEXT: li a1, 2047
-; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a2
+; RV32IXQCI-NEXT: beqz a0, .LBB54_2
+; RV32IXQCI-NEXT: # %bb.1:
+; RV32IXQCI-NEXT: addi a1, a2, -2047
+; RV32IXQCI-NEXT: .LBB54_2:
; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
%ret = select i1 %cond, i32 2049, i32 2047
diff --git a/llvm/test/CodeGen/RISCV/xqcicli.ll b/llvm/test/CodeGen/RISCV/xqcicli.ll
index 8b97616..8d4caa1 100644
--- a/llvm/test/CodeGen/RISCV/xqcicli.ll
+++ b/llvm/test/CodeGen/RISCV/xqcicli.ll
@@ -4,7 +4,7 @@
; RUN: | FileCheck %s --check-prefixes=RV32I
; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicli -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCICLI
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCI
define i32 @select_cc_example_eq(i32 %a, i32 %b, i32 %x, i32 %y) {
diff --git a/llvm/test/CodeGen/RISCV/xqcicm.ll b/llvm/test/CodeGen/RISCV/xqcicm.ll
index fb48301..8e93496 100644
--- a/llvm/test/CodeGen/RISCV/xqcicm.ll
+++ b/llvm/test/CodeGen/RISCV/xqcicm.ll
@@ -6,7 +6,7 @@
; RUN: | FileCheck %s --check-prefixes=RV32IXQCICM
; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCICM
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCI
define i32 @select_example(i32 %cond, i32 %x, i32 %y) {
diff --git a/llvm/test/CodeGen/RISCV/xqcics.ll b/llvm/test/CodeGen/RISCV/xqcics.ll
index 5b7ca9e7..c0839c9 100644
--- a/llvm/test/CodeGen/RISCV/xqcics.ll
+++ b/llvm/test/CodeGen/RISCV/xqcics.ll
@@ -6,7 +6,7 @@
; RUN: | FileCheck %s --check-prefixes=RV32IXQCICS
; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcics,+experimental-xqcicm -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCICM
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli -verify-machineinstrs < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm,+experimental-xqcics,+experimental-xqcicli,+zca,+short-forward-branch-opt,+conditional-cmv-fusion -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefixes=RV32IXQCI
define i32 @select_cc_example_eq_s1(i32 %a, i32 %b, i32 %x, i32 %y) {
@@ -690,3 +690,127 @@ entry:
ret i32 %sel
}
+define i32 @select_cc_example_eq1(i32 %a, i32 %b, i32 %x, i32 %y) {
+; RV32I-LABEL: select_cc_example_eq1:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: beq a1, a0, .LBB21_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: li a2, 11
+; RV32I-NEXT: .LBB21_2: # %entry
+; RV32I-NEXT: mv a0, a2
+; RV32I-NEXT: ret
+;
+; RV32IXQCICS-LABEL: select_cc_example_eq1:
+; RV32IXQCICS: # %bb.0: # %entry
+; RV32IXQCICS-NEXT: qc.selectieq a0, a1, a2, 11
+; RV32IXQCICS-NEXT: ret
+;
+; RV32IXQCICM-LABEL: select_cc_example_eq1:
+; RV32IXQCICM: # %bb.0: # %entry
+; RV32IXQCICM-NEXT: qc.selectieq a0, a1, a2, 11
+; RV32IXQCICM-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_cc_example_eq1:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: qc.line a2, a1, a0, 11
+; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: ret
+entry:
+ %cmp = icmp eq i32 %b, %a
+ %sel = select i1 %cmp, i32 %x, i32 11
+ ret i32 %sel
+}
+
+define i32 @select_cc_example_ne1(i32 %a, i32 %b, i32 %x, i32 %y) {
+; RV32I-LABEL: select_cc_example_ne1:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: bne a1, a0, .LBB22_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: li a2, 11
+; RV32I-NEXT: .LBB22_2: # %entry
+; RV32I-NEXT: mv a0, a2
+; RV32I-NEXT: ret
+;
+; RV32IXQCICS-LABEL: select_cc_example_ne1:
+; RV32IXQCICS: # %bb.0: # %entry
+; RV32IXQCICS-NEXT: qc.selectine a0, a1, a2, 11
+; RV32IXQCICS-NEXT: ret
+;
+; RV32IXQCICM-LABEL: select_cc_example_ne1:
+; RV32IXQCICM: # %bb.0: # %entry
+; RV32IXQCICM-NEXT: qc.selectine a0, a1, a2, 11
+; RV32IXQCICM-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_cc_example_ne1:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: qc.lieq a2, a1, a0, 11
+; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: ret
+entry:
+ %cmp = icmp ne i32 %b, %a
+ %sel = select i1 %cmp, i32 %x, i32 11
+ ret i32 %sel
+}
+
+
+define i32 @select_cc_example_eq2(i32 %a, i32 %b, i32 %x, i32 %y) {
+; RV32I-LABEL: select_cc_example_eq2:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: beq a1, a0, .LBB23_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: li a0, 11
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB23_2:
+; RV32I-NEXT: li a0, 15
+; RV32I-NEXT: ret
+;
+; RV32IXQCICS-LABEL: select_cc_example_eq2:
+; RV32IXQCICS: # %bb.0: # %entry
+; RV32IXQCICS-NEXT: qc.selectiieq a0, a1, 15, 11
+; RV32IXQCICS-NEXT: ret
+;
+; RV32IXQCICM-LABEL: select_cc_example_eq2:
+; RV32IXQCICM: # %bb.0: # %entry
+; RV32IXQCICM-NEXT: qc.selectiieq a0, a1, 15, 11
+; RV32IXQCICM-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_cc_example_eq2:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: qc.selectiieq a0, a1, 15, 11
+; RV32IXQCI-NEXT: ret
+entry:
+ %cmp = icmp eq i32 %b, %a
+ %sel = select i1 %cmp, i32 15, i32 11
+ ret i32 %sel
+}
+
+define i32 @select_cc_example_ne2(i32 %a, i32 %b, i32 %x, i32 %y) {
+; RV32I-LABEL: select_cc_example_ne2:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: bne a1, a0, .LBB24_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: li a0, 11
+; RV32I-NEXT: ret
+; RV32I-NEXT: .LBB24_2:
+; RV32I-NEXT: li a0, 15
+; RV32I-NEXT: ret
+;
+; RV32IXQCICS-LABEL: select_cc_example_ne2:
+; RV32IXQCICS: # %bb.0: # %entry
+; RV32IXQCICS-NEXT: qc.selectiine a0, a1, 15, 11
+; RV32IXQCICS-NEXT: ret
+;
+; RV32IXQCICM-LABEL: select_cc_example_ne2:
+; RV32IXQCICM: # %bb.0: # %entry
+; RV32IXQCICM-NEXT: qc.selectiine a0, a1, 15, 11
+; RV32IXQCICM-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_cc_example_ne2:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: qc.selectiine a0, a1, 15, 11
+; RV32IXQCI-NEXT: ret
+entry:
+ %cmp = icmp ne i32 %b, %a
+ %sel = select i1 %cmp, i32 15, i32 11
+ ret i32 %sel
+}
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll
index cd52498..2964da9 100644
--- a/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll
@@ -32,6 +32,7 @@
; CHECK-DAG: OpDecorate [[g]] Binding 0
; CHECK-DAG: OpDecorate [[h]] DescriptorSet 10
; CHECK-DAG: OpDecorate [[h]] Binding 3
+; CHECK-NOT: OpDecorate [[h]] Binding 4
; CHECK-DAG: OpDecorate [[i]] DescriptorSet 10
; CHECK-DAG: OpDecorate [[i]] Binding 2
@@ -44,30 +45,34 @@ entry:
%3 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 2, i32 1, i32 0, ptr nonnull @.str.6)
%4 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 10, i32 1, i32 1, i32 0, ptr nonnull @.str.8)
%5 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 2, i32 10, i32 1, i32 0, ptr nonnull @.str.10)
- %6 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 3, i32 10, i32 1, i32 0, ptr nonnull @.str.12)
- %7 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 10, i32 2, i32 1, i32 0, ptr nonnull @.str.14)
- %8 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %1, i32 0)
- %9 = load i32, ptr addrspace(11) %8, align 4
- %10 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %2, i32 0)
- %11 = load i32, ptr addrspace(11) %10, align 4
- %add.i = add nsw i32 %11, %9
- %12 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %3, i32 0)
- %13 = load i32, ptr addrspace(11) %12, align 4
- %add4.i = add nsw i32 %add.i, %13
- %14 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %4, i32 0)
- %15 = load i32, ptr addrspace(11) %14, align 4
- %add6.i = add nsw i32 %add4.i, %15
- %16 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %5, i32 0)
- %17 = load i32, ptr addrspace(11) %16, align 4
- %add8.i = add nsw i32 %add6.i, %17
- %18 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %6, i32 0)
- %19 = load i32, ptr addrspace(11) %18, align 4
- %add10.i = add nsw i32 %add8.i, %19
- %20 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %7, i32 0)
- %21 = load i32, ptr addrspace(11) %20, align 4
- %add12.i = add nsw i32 %add10.i, %21
- %22 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %0, i32 0)
- store i32 %add12.i, ptr addrspace(11) %22, align 4
+ %6 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 3, i32 10, i32 2, i32 0, ptr nonnull @.str.12)
+ %7 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 3, i32 10, i32 2, i32 1, ptr nonnull @.str.12)
+ %8 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 10, i32 2, i32 1, i32 0, ptr nonnull @.str.14)
+ %9 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %1, i32 0)
+ %10 = load i32, ptr addrspace(11) %9, align 4
+ %11 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %2, i32 0)
+ %12 = load i32, ptr addrspace(11) %11, align 4
+ %add.i = add nsw i32 %12, %10
+ %13 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %3, i32 0)
+ %14 = load i32, ptr addrspace(11) %13, align 4
+ %add4.i = add nsw i32 %add.i, %14
+ %15 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %4, i32 0)
+ %16 = load i32, ptr addrspace(11) %15, align 4
+ %add6.i = add nsw i32 %add4.i, %16
+ %17 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %5, i32 0)
+ %18 = load i32, ptr addrspace(11) %17, align 4
+ %add8.i = add nsw i32 %add6.i, %18
+ %19 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %6, i32 0)
+ %20 = load i32, ptr addrspace(11) %19, align 4
+ %add10.i = add nsw i32 %add8.i, %20
+ %21 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %7, i32 0)
+ %22 = load i32, ptr addrspace(11) %21, align 4
+ %add12.i = add nsw i32 %add10.i, %22
+ %23 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %8, i32 0)
+ %24 = load i32, ptr addrspace(11) %23, align 4
+ %add14.i = add nsw i32 %add12.i, %24
+ %25 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %0, i32 0)
+ store i32 %add14.i, ptr addrspace(11) %25, align 4
ret void
}
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/UniqueImplicitBindingNumber.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/UniqueImplicitBindingNumber.ll
new file mode 100644
index 0000000..c968c99
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/UniqueImplicitBindingNumber.ll
@@ -0,0 +1,19 @@
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
+; CHECK-ERROR: LLVM ERROR: Implicit binding calls with the same order ID must have the same descriptor set
+
+@.str = private unnamed_addr constant [2 x i8] c"b\00", align 1
+@.str.2 = private unnamed_addr constant [2 x i8] c"c\00", align 1
+
+define void @main() local_unnamed_addr #0 {
+entry:
+ %0 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @.str)
+ %1 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %0, i32 0)
+ %2 = load i32, ptr addrspace(11) %1, align 4
+ %3 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 1, i32 1, i32 0, ptr nonnull @.str.2)
+ %4 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %3, i32 0)
+ store i32 %2, ptr addrspace(11) %4, align 4
+ ret void
+}
+
+
+attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
diff --git a/llvm/test/CodeGen/SystemZ/fp-cmp-04.ll b/llvm/test/CodeGen/SystemZ/fp-cmp-04.ll
index d3d6413..eb7c1b6 100644
--- a/llvm/test/CodeGen/SystemZ/fp-cmp-04.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-cmp-04.ll
@@ -235,7 +235,7 @@ define half @f12_half(half %dummy, half %val, ptr %dest) {
; CHECK-NEXT: blah %f0
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: brasl %r14, __extendhfsf2@PLT
-; CHECK-NEXT: ltebr %f0, %f0
+; CHECK-NEXT: ltebr %f1, %f0
; CHECK-NEXT: jl .LBB11_2
; CHECK-NEXT:# %bb.1:
; CHECK-NEXT: lgdr %r0, %f8
@@ -344,7 +344,7 @@ define half @f15_half(half %val, half %dummy, ptr %dest) {
; CHECK-NEXT: blah %f2
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: brasl %r14, __extendhfsf2@PLT
-; CHECK-NEXT: ltebr %f0, %f0
+; CHECK-NEXT: ltebr %f1, %f0
; CHECK-NEXT: jl .LBB15_2
; CHECK-NEXT:# %bb.1:
; CHECK-NEXT: lgdr %r0, %f8
diff --git a/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll b/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll
index 41d2c02..5a79659 100644
--- a/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-soft-float-abi.ll
@@ -348,38 +348,35 @@ entry:
define <4 x float> @vector_add_f32(<4 x float> %lhs, <4 x float> %rhs) {
; CHECK-MVE-LABEL: vector_add_f32:
; CHECK-MVE: @ %bb.0: @ %entry
-; CHECK-MVE-NEXT: .save {r4, r5, r6, r7, lr}
-; CHECK-MVE-NEXT: push {r4, r5, r6, r7, lr}
-; CHECK-MVE-NEXT: .pad #4
-; CHECK-MVE-NEXT: sub sp, #4
+; CHECK-MVE-NEXT: .save {r4, r5, r6, r7, r8, lr}
+; CHECK-MVE-NEXT: push.w {r4, r5, r6, r7, r8, lr}
; CHECK-MVE-NEXT: .vsave {d8, d9}
; CHECK-MVE-NEXT: vpush {d8, d9}
-; CHECK-MVE-NEXT: mov r4, r0
+; CHECK-MVE-NEXT: mov r8, r0
; CHECK-MVE-NEXT: add r0, sp, #40
; CHECK-MVE-NEXT: vldrw.u32 q4, [r0]
-; CHECK-MVE-NEXT: mov r6, r1
+; CHECK-MVE-NEXT: mov r7, r1
; CHECK-MVE-NEXT: mov r0, r3
-; CHECK-MVE-NEXT: mov r5, r2
-; CHECK-MVE-NEXT: vmov r7, r1, d9
+; CHECK-MVE-NEXT: mov r6, r2
+; CHECK-MVE-NEXT: vmov r4, r1, d9
; CHECK-MVE-NEXT: bl __aeabi_fadd
-; CHECK-MVE-NEXT: vmov s19, r0
-; CHECK-MVE-NEXT: mov r0, r5
-; CHECK-MVE-NEXT: mov r1, r7
-; CHECK-MVE-NEXT: bl __aeabi_fadd
-; CHECK-MVE-NEXT: vmov r5, r1, d8
-; CHECK-MVE-NEXT: vmov s18, r0
+; CHECK-MVE-NEXT: mov r5, r0
; CHECK-MVE-NEXT: mov r0, r6
+; CHECK-MVE-NEXT: mov r1, r4
; CHECK-MVE-NEXT: bl __aeabi_fadd
-; CHECK-MVE-NEXT: vmov s17, r0
-; CHECK-MVE-NEXT: mov r0, r4
-; CHECK-MVE-NEXT: mov r1, r5
+; CHECK-MVE-NEXT: vmov r6, r1, d8
+; CHECK-MVE-NEXT: mov r4, r0
+; CHECK-MVE-NEXT: mov r0, r7
; CHECK-MVE-NEXT: bl __aeabi_fadd
-; CHECK-MVE-NEXT: vmov s16, r0
-; CHECK-MVE-NEXT: vmov r2, r3, d9
-; CHECK-MVE-NEXT: vmov r0, r1, d8
+; CHECK-MVE-NEXT: mov r7, r0
+; CHECK-MVE-NEXT: mov r0, r8
+; CHECK-MVE-NEXT: mov r1, r6
+; CHECK-MVE-NEXT: bl __aeabi_fadd
+; CHECK-MVE-NEXT: mov r1, r7
+; CHECK-MVE-NEXT: mov r2, r4
+; CHECK-MVE-NEXT: mov r3, r5
; CHECK-MVE-NEXT: vpop {d8, d9}
-; CHECK-MVE-NEXT: add sp, #4
-; CHECK-MVE-NEXT: pop {r4, r5, r6, r7, pc}
+; CHECK-MVE-NEXT: pop.w {r4, r5, r6, r7, r8, pc}
;
; CHECK-BE-LABEL: vector_add_f32:
; CHECK-BE: @ %bb.0: @ %entry
diff --git a/llvm/test/CodeGen/Thumb2/mve-vld3.ll b/llvm/test/CodeGen/Thumb2/mve-vld3.ll
index 4dd9173..93b5e3f 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vld3.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vld3.ll
@@ -33,53 +33,29 @@ entry:
}
define void @vld3_v4i32(ptr %src, ptr %dst) {
-; CHECK-LV-LABEL: vld3_v4i32:
-; CHECK-LV: @ %bb.0: @ %entry
-; CHECK-LV-NEXT: .vsave {d8, d9}
-; CHECK-LV-NEXT: vpush {d8, d9}
-; CHECK-LV-NEXT: vldrw.u32 q0, [r0, #16]
-; CHECK-LV-NEXT: vldrw.u32 q1, [r0]
-; CHECK-LV-NEXT: vldrw.u32 q4, [r0, #32]
-; CHECK-LV-NEXT: vmov.f32 s10, s2
-; CHECK-LV-NEXT: vmov.f32 s13, s0
-; CHECK-LV-NEXT: vmov.f32 s14, s3
-; CHECK-LV-NEXT: vmov.f32 s8, s4
-; CHECK-LV-NEXT: vmov.f32 s9, s7
-; CHECK-LV-NEXT: vmov.f32 s12, s5
-; CHECK-LV-NEXT: vmov.f32 s15, s18
-; CHECK-LV-NEXT: vmov.f32 s11, s17
-; CHECK-LV-NEXT: vadd.i32 q2, q2, q3
-; CHECK-LV-NEXT: vmov.f32 s0, s6
-; CHECK-LV-NEXT: vmov.f32 s2, s16
-; CHECK-LV-NEXT: vmov.f32 s3, s19
-; CHECK-LV-NEXT: vadd.i32 q0, q2, q0
-; CHECK-LV-NEXT: vstrw.32 q0, [r1]
-; CHECK-LV-NEXT: vpop {d8, d9}
-; CHECK-LV-NEXT: bx lr
-;
-; CHECK-LIS-LABEL: vld3_v4i32:
-; CHECK-LIS: @ %bb.0: @ %entry
-; CHECK-LIS-NEXT: .vsave {d8, d9}
-; CHECK-LIS-NEXT: vpush {d8, d9}
-; CHECK-LIS-NEXT: vldrw.u32 q0, [r0, #16]
-; CHECK-LIS-NEXT: vldrw.u32 q1, [r0]
-; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #32]
-; CHECK-LIS-NEXT: vmov.f32 s10, s2
-; CHECK-LIS-NEXT: vmov.f32 s17, s0
-; CHECK-LIS-NEXT: vmov.f32 s18, s3
-; CHECK-LIS-NEXT: vmov.f32 s8, s4
-; CHECK-LIS-NEXT: vmov.f32 s9, s7
-; CHECK-LIS-NEXT: vmov.f32 s16, s5
-; CHECK-LIS-NEXT: vmov.f32 s19, s14
-; CHECK-LIS-NEXT: vmov.f32 s11, s13
-; CHECK-LIS-NEXT: vadd.i32 q2, q2, q4
-; CHECK-LIS-NEXT: vmov.f32 s0, s6
-; CHECK-LIS-NEXT: vmov.f32 s2, s12
-; CHECK-LIS-NEXT: vmov.f32 s3, s15
-; CHECK-LIS-NEXT: vadd.i32 q0, q2, q0
-; CHECK-LIS-NEXT: vstrw.32 q0, [r1]
-; CHECK-LIS-NEXT: vpop {d8, d9}
-; CHECK-LIS-NEXT: bx lr
+; CHECK-LABEL: vld3_v4i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9}
+; CHECK-NEXT: vpush {d8, d9}
+; CHECK-NEXT: vldrw.u32 q0, [r0, #16]
+; CHECK-NEXT: vldrw.u32 q1, [r0]
+; CHECK-NEXT: vldrw.u32 q4, [r0, #32]
+; CHECK-NEXT: vmov.f32 s10, s2
+; CHECK-NEXT: vmov.f32 s13, s0
+; CHECK-NEXT: vmov.f32 s14, s3
+; CHECK-NEXT: vmov.f32 s8, s4
+; CHECK-NEXT: vmov.f32 s9, s7
+; CHECK-NEXT: vmov.f32 s12, s5
+; CHECK-NEXT: vmov.f32 s15, s18
+; CHECK-NEXT: vmov.f32 s11, s17
+; CHECK-NEXT: vadd.i32 q2, q2, q3
+; CHECK-NEXT: vmov.f32 s0, s6
+; CHECK-NEXT: vmov.f32 s2, s16
+; CHECK-NEXT: vmov.f32 s3, s19
+; CHECK-NEXT: vadd.i32 q0, q2, q0
+; CHECK-NEXT: vstrw.32 q0, [r1]
+; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: bx lr
entry:
%l1 = load <12 x i32>, ptr %src, align 4
@@ -93,87 +69,46 @@ entry:
}
define void @vld3_v8i32(ptr %src, ptr %dst) {
-; CHECK-LV-LABEL: vld3_v8i32:
-; CHECK-LV: @ %bb.0: @ %entry
-; CHECK-LV-NEXT: .vsave {d8, d9, d10, d11}
-; CHECK-LV-NEXT: vpush {d8, d9, d10, d11}
-; CHECK-LV-NEXT: vldrw.u32 q0, [r0, #64]
-; CHECK-LV-NEXT: vldrw.u32 q1, [r0, #48]
-; CHECK-LV-NEXT: vldrw.u32 q4, [r0, #80]
-; CHECK-LV-NEXT: vmov.f32 s10, s2
-; CHECK-LV-NEXT: vmov.f32 s13, s0
-; CHECK-LV-NEXT: vmov.f32 s14, s3
-; CHECK-LV-NEXT: vmov.f32 s8, s4
-; CHECK-LV-NEXT: vmov.f32 s9, s7
-; CHECK-LV-NEXT: vmov.f32 s12, s5
-; CHECK-LV-NEXT: vmov.f32 s15, s18
-; CHECK-LV-NEXT: vmov.f32 s11, s17
-; CHECK-LV-NEXT: vadd.i32 q2, q2, q3
-; CHECK-LV-NEXT: vmov.f32 s0, s6
-; CHECK-LV-NEXT: vmov.f32 s2, s16
-; CHECK-LV-NEXT: vldrw.u32 q1, [r0, #16]
-; CHECK-LV-NEXT: vmov.f32 s3, s19
-; CHECK-LV-NEXT: vldrw.u32 q3, [r0, #32]
-; CHECK-LV-NEXT: vadd.i32 q0, q2, q0
-; CHECK-LV-NEXT: vldrw.u32 q2, [r0]
-; CHECK-LV-NEXT: vmov.f32 s17, s4
-; CHECK-LV-NEXT: vstrw.32 q0, [r1, #16]
-; CHECK-LV-NEXT: vmov.f32 s18, s7
-; CHECK-LV-NEXT: vmov.f32 s22, s6
-; CHECK-LV-NEXT: vmov.f32 s16, s9
-; CHECK-LV-NEXT: vmov.f32 s19, s14
-; CHECK-LV-NEXT: vmov.f32 s20, s8
-; CHECK-LV-NEXT: vmov.f32 s21, s11
-; CHECK-LV-NEXT: vmov.f32 s23, s13
-; CHECK-LV-NEXT: vadd.i32 q4, q5, q4
-; CHECK-LV-NEXT: vmov.f32 s4, s10
-; CHECK-LV-NEXT: vmov.f32 s6, s12
-; CHECK-LV-NEXT: vmov.f32 s7, s15
-; CHECK-LV-NEXT: vadd.i32 q1, q4, q1
-; CHECK-LV-NEXT: vstrw.32 q1, [r1]
-; CHECK-LV-NEXT: vpop {d8, d9, d10, d11}
-; CHECK-LV-NEXT: bx lr
-;
-; CHECK-LIS-LABEL: vld3_v8i32:
-; CHECK-LIS: @ %bb.0: @ %entry
-; CHECK-LIS-NEXT: .vsave {d8, d9, d10, d11}
-; CHECK-LIS-NEXT: vpush {d8, d9, d10, d11}
-; CHECK-LIS-NEXT: vldrw.u32 q0, [r0, #64]
-; CHECK-LIS-NEXT: vldrw.u32 q1, [r0, #48]
-; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #80]
-; CHECK-LIS-NEXT: vmov.f32 s10, s2
-; CHECK-LIS-NEXT: vmov.f32 s17, s0
-; CHECK-LIS-NEXT: vmov.f32 s18, s3
-; CHECK-LIS-NEXT: vmov.f32 s8, s4
-; CHECK-LIS-NEXT: vmov.f32 s9, s7
-; CHECK-LIS-NEXT: vmov.f32 s16, s5
-; CHECK-LIS-NEXT: vmov.f32 s19, s14
-; CHECK-LIS-NEXT: vmov.f32 s11, s13
-; CHECK-LIS-NEXT: vmov.f32 s0, s6
-; CHECK-LIS-NEXT: vadd.i32 q2, q2, q4
-; CHECK-LIS-NEXT: vmov.f32 s2, s12
-; CHECK-LIS-NEXT: vldrw.u32 q1, [r0, #16]
-; CHECK-LIS-NEXT: vmov.f32 s3, s15
-; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #32]
-; CHECK-LIS-NEXT: vadd.i32 q0, q2, q0
-; CHECK-LIS-NEXT: vldrw.u32 q2, [r0]
-; CHECK-LIS-NEXT: vmov.f32 s17, s4
-; CHECK-LIS-NEXT: vstrw.32 q0, [r1, #16]
-; CHECK-LIS-NEXT: vmov.f32 s18, s7
-; CHECK-LIS-NEXT: vmov.f32 s22, s6
-; CHECK-LIS-NEXT: vmov.f32 s16, s9
-; CHECK-LIS-NEXT: vmov.f32 s19, s14
-; CHECK-LIS-NEXT: vmov.f32 s20, s8
-; CHECK-LIS-NEXT: vmov.f32 s21, s11
-; CHECK-LIS-NEXT: vmov.f32 s23, s13
-; CHECK-LIS-NEXT: vadd.i32 q4, q5, q4
-; CHECK-LIS-NEXT: vmov.f32 s4, s10
-; CHECK-LIS-NEXT: vmov.f32 s6, s12
-; CHECK-LIS-NEXT: vmov.f32 s7, s15
-; CHECK-LIS-NEXT: vadd.i32 q1, q4, q1
-; CHECK-LIS-NEXT: vstrw.32 q1, [r1]
-; CHECK-LIS-NEXT: vpop {d8, d9, d10, d11}
-; CHECK-LIS-NEXT: bx lr
+; CHECK-LABEL: vld3_v8i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9, d10, d11}
+; CHECK-NEXT: vpush {d8, d9, d10, d11}
+; CHECK-NEXT: vldrw.u32 q0, [r0, #64]
+; CHECK-NEXT: vldrw.u32 q1, [r0, #48]
+; CHECK-NEXT: vldrw.u32 q4, [r0, #80]
+; CHECK-NEXT: vmov.f32 s10, s2
+; CHECK-NEXT: vmov.f32 s13, s0
+; CHECK-NEXT: vmov.f32 s14, s3
+; CHECK-NEXT: vmov.f32 s8, s4
+; CHECK-NEXT: vmov.f32 s9, s7
+; CHECK-NEXT: vmov.f32 s12, s5
+; CHECK-NEXT: vmov.f32 s15, s18
+; CHECK-NEXT: vmov.f32 s11, s17
+; CHECK-NEXT: vadd.i32 q2, q2, q3
+; CHECK-NEXT: vmov.f32 s0, s6
+; CHECK-NEXT: vmov.f32 s2, s16
+; CHECK-NEXT: vldrw.u32 q1, [r0, #16]
+; CHECK-NEXT: vmov.f32 s3, s19
+; CHECK-NEXT: vldrw.u32 q3, [r0, #32]
+; CHECK-NEXT: vadd.i32 q0, q2, q0
+; CHECK-NEXT: vldrw.u32 q2, [r0]
+; CHECK-NEXT: vmov.f32 s17, s4
+; CHECK-NEXT: vstrw.32 q0, [r1, #16]
+; CHECK-NEXT: vmov.f32 s18, s7
+; CHECK-NEXT: vmov.f32 s22, s6
+; CHECK-NEXT: vmov.f32 s16, s9
+; CHECK-NEXT: vmov.f32 s19, s14
+; CHECK-NEXT: vmov.f32 s20, s8
+; CHECK-NEXT: vmov.f32 s21, s11
+; CHECK-NEXT: vmov.f32 s23, s13
+; CHECK-NEXT: vadd.i32 q4, q5, q4
+; CHECK-NEXT: vmov.f32 s4, s10
+; CHECK-NEXT: vmov.f32 s6, s12
+; CHECK-NEXT: vmov.f32 s7, s15
+; CHECK-NEXT: vadd.i32 q1, q4, q1
+; CHECK-NEXT: vstrw.32 q1, [r1]
+; CHECK-NEXT: vpop {d8, d9, d10, d11}
+; CHECK-NEXT: bx lr
entry:
%l1 = load <24 x i32>, ptr %src, align 4
@@ -187,155 +122,80 @@ entry:
}
define void @vld3_v16i32(ptr %src, ptr %dst) {
-; CHECK-LV-LABEL: vld3_v16i32:
-; CHECK-LV: @ %bb.0: @ %entry
-; CHECK-LV-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-LV-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-LV-NEXT: vldrw.u32 q0, [r0, #64]
-; CHECK-LV-NEXT: vldrw.u32 q1, [r0, #48]
-; CHECK-LV-NEXT: vldrw.u32 q4, [r0, #80]
-; CHECK-LV-NEXT: vldrw.u32 q6, [r0, #176]
-; CHECK-LV-NEXT: vmov.f32 s10, s2
-; CHECK-LV-NEXT: vmov.f32 s13, s0
-; CHECK-LV-NEXT: vmov.f32 s14, s3
-; CHECK-LV-NEXT: vmov.f32 s8, s4
-; CHECK-LV-NEXT: vmov.f32 s9, s7
-; CHECK-LV-NEXT: vmov.f32 s12, s5
-; CHECK-LV-NEXT: vmov.f32 s15, s18
-; CHECK-LV-NEXT: vmov.f32 s11, s17
-; CHECK-LV-NEXT: vadd.i32 q2, q2, q3
-; CHECK-LV-NEXT: vmov.f32 s0, s6
-; CHECK-LV-NEXT: vmov.f32 s2, s16
-; CHECK-LV-NEXT: vldrw.u32 q1, [r0, #16]
-; CHECK-LV-NEXT: vmov.f32 s3, s19
-; CHECK-LV-NEXT: vldrw.u32 q3, [r0, #32]
-; CHECK-LV-NEXT: vadd.i32 q0, q2, q0
-; CHECK-LV-NEXT: vldrw.u32 q2, [r0]
-; CHECK-LV-NEXT: vmov.f32 s17, s4
-; CHECK-LV-NEXT: vmov.f32 s18, s7
-; CHECK-LV-NEXT: vmov.f32 s22, s6
-; CHECK-LV-NEXT: vmov.f32 s16, s9
-; CHECK-LV-NEXT: vmov.f32 s19, s14
-; CHECK-LV-NEXT: vmov.f32 s20, s8
-; CHECK-LV-NEXT: vmov.f32 s21, s11
-; CHECK-LV-NEXT: vmov.f32 s23, s13
-; CHECK-LV-NEXT: vmov.f32 s4, s10
-; CHECK-LV-NEXT: vldrw.u32 q2, [r0, #160]
-; CHECK-LV-NEXT: vmov.f32 s6, s12
-; CHECK-LV-NEXT: vadd.i32 q4, q5, q4
-; CHECK-LV-NEXT: vmov.f32 s7, s15
-; CHECK-LV-NEXT: vldrw.u32 q3, [r0, #144]
-; CHECK-LV-NEXT: vadd.i32 q1, q4, q1
-; CHECK-LV-NEXT: vmov.f32 s18, s10
-; CHECK-LV-NEXT: vmov.f32 s21, s8
-; CHECK-LV-NEXT: vmov.f32 s22, s11
-; CHECK-LV-NEXT: vmov.f32 s16, s12
-; CHECK-LV-NEXT: vmov.f32 s17, s15
-; CHECK-LV-NEXT: vmov.f32 s20, s13
-; CHECK-LV-NEXT: vmov.f32 s23, s26
-; CHECK-LV-NEXT: vmov.f32 s19, s25
-; CHECK-LV-NEXT: vadd.i32 q4, q4, q5
-; CHECK-LV-NEXT: vmov.f32 s8, s14
-; CHECK-LV-NEXT: vmov.f32 s10, s24
-; CHECK-LV-NEXT: vldrw.u32 q3, [r0, #112]
-; CHECK-LV-NEXT: vmov.f32 s11, s27
-; CHECK-LV-NEXT: vldrw.u32 q5, [r0, #128]
-; CHECK-LV-NEXT: vadd.i32 q2, q4, q2
-; CHECK-LV-NEXT: vldrw.u32 q4, [r0, #96]
-; CHECK-LV-NEXT: vmov.f32 s25, s12
-; CHECK-LV-NEXT: vstrw.32 q2, [r1, #48]
-; CHECK-LV-NEXT: vmov.f32 s26, s15
-; CHECK-LV-NEXT: vstrw.32 q0, [r1, #16]
-; CHECK-LV-NEXT: vmov.f32 s30, s14
-; CHECK-LV-NEXT: vstrw.32 q1, [r1]
-; CHECK-LV-NEXT: vmov.f32 s24, s17
-; CHECK-LV-NEXT: vmov.f32 s27, s22
-; CHECK-LV-NEXT: vmov.f32 s28, s16
-; CHECK-LV-NEXT: vmov.f32 s29, s19
-; CHECK-LV-NEXT: vmov.f32 s31, s21
-; CHECK-LV-NEXT: vadd.i32 q6, q7, q6
-; CHECK-LV-NEXT: vmov.f32 s12, s18
-; CHECK-LV-NEXT: vmov.f32 s14, s20
-; CHECK-LV-NEXT: vmov.f32 s15, s23
-; CHECK-LV-NEXT: vadd.i32 q3, q6, q3
-; CHECK-LV-NEXT: vstrw.32 q3, [r1, #32]
-; CHECK-LV-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-LV-NEXT: bx lr
-;
-; CHECK-LIS-LABEL: vld3_v16i32:
-; CHECK-LIS: @ %bb.0: @ %entry
-; CHECK-LIS-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-LIS-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-LIS-NEXT: vldrw.u32 q0, [r0, #64]
-; CHECK-LIS-NEXT: vldrw.u32 q1, [r0, #48]
-; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #80]
-; CHECK-LIS-NEXT: vmov.f32 s10, s2
-; CHECK-LIS-NEXT: vmov.f32 s17, s0
-; CHECK-LIS-NEXT: vmov.f32 s18, s3
-; CHECK-LIS-NEXT: vmov.f32 s8, s4
-; CHECK-LIS-NEXT: vmov.f32 s9, s7
-; CHECK-LIS-NEXT: vmov.f32 s16, s5
-; CHECK-LIS-NEXT: vmov.f32 s19, s14
-; CHECK-LIS-NEXT: vmov.f32 s11, s13
-; CHECK-LIS-NEXT: vmov.f32 s0, s6
-; CHECK-LIS-NEXT: vadd.i32 q2, q2, q4
-; CHECK-LIS-NEXT: vmov.f32 s2, s12
-; CHECK-LIS-NEXT: vldrw.u32 q1, [r0, #16]
-; CHECK-LIS-NEXT: vmov.f32 s3, s15
-; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #32]
-; CHECK-LIS-NEXT: vadd.i32 q0, q2, q0
-; CHECK-LIS-NEXT: vldrw.u32 q2, [r0]
-; CHECK-LIS-NEXT: vmov.f32 s17, s4
-; CHECK-LIS-NEXT: vmov.f32 s18, s7
-; CHECK-LIS-NEXT: vmov.f32 s22, s6
-; CHECK-LIS-NEXT: vmov.f32 s16, s9
-; CHECK-LIS-NEXT: vmov.f32 s19, s14
-; CHECK-LIS-NEXT: vmov.f32 s20, s8
-; CHECK-LIS-NEXT: vmov.f32 s21, s11
-; CHECK-LIS-NEXT: vmov.f32 s23, s13
-; CHECK-LIS-NEXT: vadd.i32 q4, q5, q4
-; CHECK-LIS-NEXT: vmov.f32 s4, s10
-; CHECK-LIS-NEXT: vldrw.u32 q2, [r0, #160]
-; CHECK-LIS-NEXT: vldrw.u32 q5, [r0, #176]
-; CHECK-LIS-NEXT: vmov.f32 s6, s12
-; CHECK-LIS-NEXT: vmov.f32 s7, s15
-; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #144]
-; CHECK-LIS-NEXT: vadd.i32 q1, q4, q1
-; CHECK-LIS-NEXT: vmov.f32 s18, s10
-; CHECK-LIS-NEXT: vmov.f32 s25, s8
-; CHECK-LIS-NEXT: vmov.f32 s26, s11
-; CHECK-LIS-NEXT: vmov.f32 s16, s12
-; CHECK-LIS-NEXT: vmov.f32 s17, s15
-; CHECK-LIS-NEXT: vmov.f32 s24, s13
-; CHECK-LIS-NEXT: vmov.f32 s27, s22
-; CHECK-LIS-NEXT: vmov.f32 s19, s21
-; CHECK-LIS-NEXT: vmov.f32 s8, s14
-; CHECK-LIS-NEXT: vadd.i32 q4, q4, q6
-; CHECK-LIS-NEXT: vmov.f32 s10, s20
-; CHECK-LIS-NEXT: vldrw.u32 q3, [r0, #112]
-; CHECK-LIS-NEXT: vmov.f32 s11, s23
-; CHECK-LIS-NEXT: vldrw.u32 q5, [r0, #128]
-; CHECK-LIS-NEXT: vadd.i32 q2, q4, q2
-; CHECK-LIS-NEXT: vldrw.u32 q4, [r0, #96]
-; CHECK-LIS-NEXT: vmov.f32 s25, s12
-; CHECK-LIS-NEXT: vstrw.32 q2, [r1, #48]
-; CHECK-LIS-NEXT: vmov.f32 s26, s15
-; CHECK-LIS-NEXT: vstrw.32 q0, [r1, #16]
-; CHECK-LIS-NEXT: vmov.f32 s30, s14
-; CHECK-LIS-NEXT: vstrw.32 q1, [r1]
-; CHECK-LIS-NEXT: vmov.f32 s24, s17
-; CHECK-LIS-NEXT: vmov.f32 s27, s22
-; CHECK-LIS-NEXT: vmov.f32 s28, s16
-; CHECK-LIS-NEXT: vmov.f32 s29, s19
-; CHECK-LIS-NEXT: vmov.f32 s31, s21
-; CHECK-LIS-NEXT: vadd.i32 q6, q7, q6
-; CHECK-LIS-NEXT: vmov.f32 s12, s18
-; CHECK-LIS-NEXT: vmov.f32 s14, s20
-; CHECK-LIS-NEXT: vmov.f32 s15, s23
-; CHECK-LIS-NEXT: vadd.i32 q3, q6, q3
-; CHECK-LIS-NEXT: vstrw.32 q3, [r1, #32]
-; CHECK-LIS-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-LIS-NEXT: bx lr
+; CHECK-LABEL: vld3_v16i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: vldrw.u32 q0, [r0, #64]
+; CHECK-NEXT: vldrw.u32 q1, [r0, #48]
+; CHECK-NEXT: vldrw.u32 q4, [r0, #80]
+; CHECK-NEXT: vldrw.u32 q6, [r0, #176]
+; CHECK-NEXT: vmov.f32 s10, s2
+; CHECK-NEXT: vmov.f32 s13, s0
+; CHECK-NEXT: vmov.f32 s14, s3
+; CHECK-NEXT: vmov.f32 s8, s4
+; CHECK-NEXT: vmov.f32 s9, s7
+; CHECK-NEXT: vmov.f32 s12, s5
+; CHECK-NEXT: vmov.f32 s15, s18
+; CHECK-NEXT: vmov.f32 s11, s17
+; CHECK-NEXT: vadd.i32 q2, q2, q3
+; CHECK-NEXT: vmov.f32 s0, s6
+; CHECK-NEXT: vmov.f32 s2, s16
+; CHECK-NEXT: vldrw.u32 q1, [r0, #16]
+; CHECK-NEXT: vmov.f32 s3, s19
+; CHECK-NEXT: vldrw.u32 q3, [r0, #32]
+; CHECK-NEXT: vadd.i32 q0, q2, q0
+; CHECK-NEXT: vldrw.u32 q2, [r0]
+; CHECK-NEXT: vmov.f32 s17, s4
+; CHECK-NEXT: vmov.f32 s18, s7
+; CHECK-NEXT: vmov.f32 s22, s6
+; CHECK-NEXT: vmov.f32 s16, s9
+; CHECK-NEXT: vmov.f32 s19, s14
+; CHECK-NEXT: vmov.f32 s20, s8
+; CHECK-NEXT: vmov.f32 s21, s11
+; CHECK-NEXT: vmov.f32 s23, s13
+; CHECK-NEXT: vmov.f32 s4, s10
+; CHECK-NEXT: vldrw.u32 q2, [r0, #160]
+; CHECK-NEXT: vmov.f32 s6, s12
+; CHECK-NEXT: vadd.i32 q4, q5, q4
+; CHECK-NEXT: vmov.f32 s7, s15
+; CHECK-NEXT: vldrw.u32 q3, [r0, #144]
+; CHECK-NEXT: vadd.i32 q1, q4, q1
+; CHECK-NEXT: vmov.f32 s18, s10
+; CHECK-NEXT: vmov.f32 s21, s8
+; CHECK-NEXT: vmov.f32 s22, s11
+; CHECK-NEXT: vmov.f32 s16, s12
+; CHECK-NEXT: vmov.f32 s17, s15
+; CHECK-NEXT: vmov.f32 s20, s13
+; CHECK-NEXT: vmov.f32 s23, s26
+; CHECK-NEXT: vmov.f32 s19, s25
+; CHECK-NEXT: vadd.i32 q4, q4, q5
+; CHECK-NEXT: vmov.f32 s8, s14
+; CHECK-NEXT: vmov.f32 s10, s24
+; CHECK-NEXT: vldrw.u32 q3, [r0, #112]
+; CHECK-NEXT: vmov.f32 s11, s27
+; CHECK-NEXT: vldrw.u32 q5, [r0, #128]
+; CHECK-NEXT: vadd.i32 q2, q4, q2
+; CHECK-NEXT: vldrw.u32 q4, [r0, #96]
+; CHECK-NEXT: vmov.f32 s25, s12
+; CHECK-NEXT: vstrw.32 q2, [r1, #48]
+; CHECK-NEXT: vmov.f32 s26, s15
+; CHECK-NEXT: vstrw.32 q0, [r1, #16]
+; CHECK-NEXT: vmov.f32 s30, s14
+; CHECK-NEXT: vstrw.32 q1, [r1]
+; CHECK-NEXT: vmov.f32 s24, s17
+; CHECK-NEXT: vmov.f32 s27, s22
+; CHECK-NEXT: vmov.f32 s28, s16
+; CHECK-NEXT: vmov.f32 s29, s19
+; CHECK-NEXT: vmov.f32 s31, s21
+; CHECK-NEXT: vadd.i32 q6, q7, q6
+; CHECK-NEXT: vmov.f32 s12, s18
+; CHECK-NEXT: vmov.f32 s14, s20
+; CHECK-NEXT: vmov.f32 s15, s23
+; CHECK-NEXT: vadd.i32 q3, q6, q3
+; CHECK-NEXT: vstrw.32 q3, [r1, #32]
+; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: bx lr
entry:
%l1 = load <48 x i32>, ptr %src, align 4
diff --git a/llvm/test/CodeGen/VE/Vector/vec_divrem.ll b/llvm/test/CodeGen/VE/Vector/vec_divrem.ll
index 3bc0aba..93e2889 100644
--- a/llvm/test/CodeGen/VE/Vector/vec_divrem.ll
+++ b/llvm/test/CodeGen/VE/Vector/vec_divrem.ll
@@ -7,19 +7,22 @@
define <4 x i8> @udiv_by_minus_one(<4 x i8> %x) {
; CHECK-LABEL: udiv_by_minus_one:
; CHECK: # %bb.0:
-; CHECK-NEXT: and %s0, %s0, (56)0
-; CHECK-NEXT: lea %s4, 16843010
-; CHECK-NEXT: muls.l %s0, %s0, %s4
-; CHECK-NEXT: srl %s0, %s0, 32
+; CHECK-NEXT: and %s4, %s0, (56)0
; CHECK-NEXT: and %s1, %s1, (56)0
-; CHECK-NEXT: muls.l %s1, %s1, %s4
-; CHECK-NEXT: srl %s1, %s1, 32
; CHECK-NEXT: and %s2, %s2, (56)0
-; CHECK-NEXT: muls.l %s2, %s2, %s4
-; CHECK-NEXT: srl %s2, %s2, 32
; CHECK-NEXT: and %s3, %s3, (56)0
-; CHECK-NEXT: muls.l %s3, %s3, %s4
-; CHECK-NEXT: srl %s3, %s3, 32
+; CHECK-NEXT: or %s0, 0, (0)1
+; CHECK-NEXT: cmpu.w %s5, %s3, (56)0
+; CHECK-NEXT: or %s3, 0, (0)1
+; CHECK-NEXT: cmov.w.eq %s3, (63)0, %s5
+; CHECK-NEXT: cmpu.w %s5, %s2, (56)0
+; CHECK-NEXT: or %s2, 0, (0)1
+; CHECK-NEXT: cmov.w.eq %s2, (63)0, %s5
+; CHECK-NEXT: cmpu.w %s5, %s1, (56)0
+; CHECK-NEXT: or %s1, 0, (0)1
+; CHECK-NEXT: cmov.w.eq %s1, (63)0, %s5
+; CHECK-NEXT: cmpu.w %s4, %s4, (56)0
+; CHECK-NEXT: cmov.w.eq %s0, (63)0, %s4
; CHECK-NEXT: b.l.t (, %s10)
%r = udiv <4 x i8> %x, <i8 255, i8 255, i8 255, i8 255>
ret <4 x i8> %r
@@ -28,27 +31,18 @@ define <4 x i8> @udiv_by_minus_one(<4 x i8> %x) {
define <4 x i8> @urem_by_minus_one(<4 x i8> %x) {
; CHECK-LABEL: urem_by_minus_one:
; CHECK: # %bb.0:
-; CHECK-NEXT: and %s0, %s0, (56)0
-; CHECK-NEXT: and %s1, %s1, (56)0
-; CHECK-NEXT: and %s2, %s2, (56)0
-; CHECK-NEXT: and %s3, %s3, (56)0
-; CHECK-NEXT: lea %s4, 16843010
-; CHECK-NEXT: muls.l %s5, %s3, %s4
-; CHECK-NEXT: srl %s5, %s5, 32
-; CHECK-NEXT: muls.w.sx %s5, %s5, (56)0
-; CHECK-NEXT: subs.w.sx %s3, %s3, %s5
-; CHECK-NEXT: muls.l %s5, %s2, %s4
-; CHECK-NEXT: srl %s5, %s5, 32
-; CHECK-NEXT: muls.w.sx %s5, %s5, (56)0
-; CHECK-NEXT: subs.w.sx %s2, %s2, %s5
-; CHECK-NEXT: muls.l %s5, %s1, %s4
-; CHECK-NEXT: srl %s5, %s5, 32
-; CHECK-NEXT: muls.w.sx %s5, %s5, (56)0
-; CHECK-NEXT: subs.w.sx %s1, %s1, %s5
-; CHECK-NEXT: muls.l %s4, %s0, %s4
-; CHECK-NEXT: srl %s4, %s4, 32
-; CHECK-NEXT: muls.w.sx %s4, %s4, (56)0
-; CHECK-NEXT: subs.w.sx %s0, %s0, %s4
+; CHECK-NEXT: and %s4, %s0, (56)0
+; CHECK-NEXT: and %s5, %s1, (56)0
+; CHECK-NEXT: and %s6, %s2, (56)0
+; CHECK-NEXT: and %s7, %s3, (56)0
+; CHECK-NEXT: cmpu.w %s7, %s7, (56)0
+; CHECK-NEXT: cmov.w.eq %s3, (0)1, %s7
+; CHECK-NEXT: cmpu.w %s6, %s6, (56)0
+; CHECK-NEXT: cmov.w.eq %s2, (0)1, %s6
+; CHECK-NEXT: cmpu.w %s5, %s5, (56)0
+; CHECK-NEXT: cmov.w.eq %s1, (0)1, %s5
+; CHECK-NEXT: cmpu.w %s4, %s4, (56)0
+; CHECK-NEXT: cmov.w.eq %s0, (0)1, %s4
; CHECK-NEXT: b.l.t (, %s10)
%r = urem <4 x i8> %x, <i8 255, i8 255, i8 255, i8 255>
ret <4 x i8> %r
diff --git a/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll b/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll
index 6ef7219..9cf7aab 100644
--- a/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll
+++ b/llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll
@@ -56,14 +56,9 @@ define void @PR90954(ptr %0, ptr %1, i32 %2) nounwind {
; CHECK-LABEL: PR90954:
; CHECK: # %bb.0:
; CHECK-NEXT: pushq %rbp
-; CHECK-NEXT: movq %rsp, %rbp
-; CHECK-NEXT: pushq %r15
; CHECK-NEXT: pushq %r14
-; CHECK-NEXT: pushq %r13
-; CHECK-NEXT: pushq %r12
; CHECK-NEXT: pushq %rbx
-; CHECK-NEXT: andq $-1024, %rsp # imm = 0xFC00
-; CHECK-NEXT: subq $5120, %rsp # imm = 0x1400
+; CHECK-NEXT: subq $2912, %rsp # imm = 0xB60
; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovups %zmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: movb $1, {{[0-9]+}}(%rsp)
@@ -79,29 +74,26 @@ define void @PR90954(ptr %0, ptr %1, i32 %2) nounwind {
; CHECK-NEXT: movw $64, %cx
; CHECK-NEXT: movw $16, %di
; CHECK-NEXT: movb $1, %r8b
-; CHECK-NEXT: movl $64, %r9d
-; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %r10
-; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %r11
-; CHECK-NEXT: xorl %ebx, %ebx
-; CHECK-NEXT: xorl %r14d, %r14d
+; CHECK-NEXT: xorl %r9d, %r9d
+; CHECK-NEXT: xorl %r10d, %r10d
; CHECK-NEXT: jmp .LBB1_1
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB1_5: # in Loop: Header=BB1_1 Depth=1
-; CHECK-NEXT: incq %r14
-; CHECK-NEXT: addl %edx, %ebx
+; CHECK-NEXT: incq %r10
+; CHECK-NEXT: addl %edx, %r9d
; CHECK-NEXT: .LBB1_1: # =>This Loop Header: Depth=1
; CHECK-NEXT: # Child Loop BB1_2 Depth 2
-; CHECK-NEXT: movslq %ebx, %r15
-; CHECK-NEXT: leaq (%rsi,%r15,4), %r15
-; CHECK-NEXT: xorl %r12d, %r12d
-; CHECK-NEXT: xorl %r13d, %r13d
+; CHECK-NEXT: movslq %r9d, %r11
+; CHECK-NEXT: leaq (%rsi,%r11,4), %r11
+; CHECK-NEXT: xorl %ebx, %ebx
+; CHECK-NEXT: xorl %r14d, %r14d
; CHECK-NEXT: jmp .LBB1_2
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB1_4: # in Loop: Header=BB1_2 Depth=2
-; CHECK-NEXT: tilestored %tmm1, (%r15,%rax)
-; CHECK-NEXT: incq %r13
-; CHECK-NEXT: addq $64, %r15
-; CHECK-NEXT: decq %r12
+; CHECK-NEXT: tilestored %tmm1, (%r11,%rax)
+; CHECK-NEXT: incq %r14
+; CHECK-NEXT: addq $64, %r11
+; CHECK-NEXT: decq %rbx
; CHECK-NEXT: je .LBB1_5
; CHECK-NEXT: .LBB1_2: # Parent Loop BB1_1 Depth=1
; CHECK-NEXT: # => This Inner Loop Header: Depth=2
@@ -110,46 +102,12 @@ define void @PR90954(ptr %0, ptr %1, i32 %2) nounwind {
; CHECK-NEXT: testb %r8b, %r8b
; CHECK-NEXT: jne .LBB1_4
; CHECK-NEXT: # %bb.3: # in Loop: Header=BB1_2 Depth=2
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: tileloadd (%r10,%r9), %tmm1
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: vmovaps %zmm0, {{[0-9]+}}(%rsp)
-; CHECK-NEXT: tileloadd (%r11,%r9), %tmm2
+; CHECK-NEXT: tilezero %tmm1
+; CHECK-NEXT: tilezero %tmm2
; CHECK-NEXT: tdpbf16ps %tmm2, %tmm1, %tmm0
-; CHECK-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-NEXT: movabsq $64, %rax
-; CHECK-NEXT: tilestored %tmm0, 3072(%rsp,%rax) # 1024-byte Folded Spill
-; CHECK-NEXT: tileloadd 3072(%rsp,%rax), %tmm1 # 1024-byte Folded Reload
-; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; CHECK-NEXT: movabsq $64, %rbp
+; CHECK-NEXT: tilestored %tmm0, 896(%rsp,%rbp) # 1024-byte Folded Spill
+; CHECK-NEXT: tileloadd 896(%rsp,%rbp), %tmm1 # 1024-byte Folded Reload
; CHECK-NEXT: jmp .LBB1_4
%4 = shl i32 %2, 4
%5 = icmp eq i64 0, 0
diff --git a/llvm/test/CodeGen/X86/combine-pack.ll b/llvm/test/CodeGen/X86/combine-pack.ll
new file mode 100644
index 0000000..2f5454d
--- /dev/null
+++ b/llvm/test/CodeGen/X86/combine-pack.ll
@@ -0,0 +1,49 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX
+
+declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>)
+
+define <8 x i16> @combine_packss_v4i32_signsplat(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: combine_packss_v4i32_signsplat:
+; SSE: # %bb.0:
+; SSE-NEXT: pcmpgtd %xmm1, %xmm0
+; SSE-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_packss_v4i32_signsplat:
+; AVX: # %bb.0:
+; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %cmp = icmp sgt <4 x i32> %a0, %a1
+ %ext = sext <4 x i1> %cmp to <4 x i32>
+ %pack = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %ext, <4 x i32> splat (i32 -1))
+ %signsplat = ashr <8 x i16> %pack, splat (i16 15)
+ ret <8 x i16> %signsplat
+}
+
+define <8 x i16> @combine_packss_v4i32_freeze_signsplat(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE-LABEL: combine_packss_v4i32_freeze_signsplat:
+; SSE: # %bb.0:
+; SSE-NEXT: pcmpgtd %xmm1, %xmm0
+; SSE-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_packss_v4i32_freeze_signsplat:
+; AVX: # %bb.0:
+; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %cmp = icmp sgt <4 x i32> %a0, %a1
+ %ext = sext <4 x i1> %cmp to <4 x i32>
+ %pack = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %ext, <4 x i32> splat (i32 -1))
+ %freeze = freeze <8 x i16> %pack
+ %signsplat = ashr <8 x i16> %freeze, splat (i16 15)
+ ret <8 x i16> %signsplat
+}
diff --git a/llvm/test/CodeGen/X86/fshl.ll b/llvm/test/CodeGen/X86/fshl.ll
index ec1b8a3..f998128 100644
--- a/llvm/test/CodeGen/X86/fshl.ll
+++ b/llvm/test/CodeGen/X86/fshl.ll
@@ -335,84 +335,83 @@ define i128 @var_shift_i128(i128 %x, i128 %y, i128 %z) nounwind {
; X86-SLOW-NEXT: pushl %esi
; X86-SLOW-NEXT: andl $-16, %esp
; X86-SLOW-NEXT: subl $32, %esp
-; X86-SLOW-NEXT: movl 24(%ebp), %esi
+; X86-SLOW-NEXT: movl 24(%ebp), %edi
; X86-SLOW-NEXT: movl 28(%ebp), %eax
; X86-SLOW-NEXT: movl 48(%ebp), %edx
; X86-SLOW-NEXT: movl 56(%ebp), %ecx
; X86-SLOW-NEXT: testb $64, %cl
-; X86-SLOW-NEXT: movl 52(%ebp), %edi
+; X86-SLOW-NEXT: movl 52(%ebp), %ebx
; X86-SLOW-NEXT: jne .LBB6_1
; X86-SLOW-NEXT: # %bb.2:
; X86-SLOW-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SLOW-NEXT: movl %esi, %edx
-; X86-SLOW-NEXT: movl 32(%ebp), %esi
-; X86-SLOW-NEXT: movl %edi, %ecx
-; X86-SLOW-NEXT: movl %eax, %edi
+; X86-SLOW-NEXT: movl %edi, %edx
+; X86-SLOW-NEXT: movl 32(%ebp), %edi
+; X86-SLOW-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SLOW-NEXT: movl %eax, %ebx
; X86-SLOW-NEXT: movl 36(%ebp), %eax
; X86-SLOW-NEXT: jmp .LBB6_3
; X86-SLOW-NEXT: .LBB6_1:
; X86-SLOW-NEXT: movl 40(%ebp), %ecx
; X86-SLOW-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-SLOW-NEXT: movl 44(%ebp), %ecx
+; X86-SLOW-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-SLOW-NEXT: .LBB6_3:
-; X86-SLOW-NEXT: movl 56(%ebp), %ebx
-; X86-SLOW-NEXT: testb $32, %bl
+; X86-SLOW-NEXT: movl 56(%ebp), %ecx
+; X86-SLOW-NEXT: testb $32, %cl
; X86-SLOW-NEXT: jne .LBB6_4
; X86-SLOW-NEXT: # %bb.5:
-; X86-SLOW-NEXT: movl %ecx, %ebx
; X86-SLOW-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SLOW-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SLOW-NEXT: movl %edx, %edi
+; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-SLOW-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-SLOW-NEXT: jmp .LBB6_6
; X86-SLOW-NEXT: .LBB6_4:
-; X86-SLOW-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-SLOW-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SLOW-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SLOW-NEXT: movl %ecx, %edx
-; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-SLOW-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SLOW-NEXT: movl %edx, %ebx
+; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-SLOW-NEXT: .LBB6_6:
-; X86-SLOW-NEXT: movl %edx, %esi
+; X86-SLOW-NEXT: movl %edi, %eax
+; X86-SLOW-NEXT: shll %cl, %eax
+; X86-SLOW-NEXT: shrl %esi
+; X86-SLOW-NEXT: movl %ecx, %edx
+; X86-SLOW-NEXT: notb %dl
+; X86-SLOW-NEXT: movl %edx, %ecx
+; X86-SLOW-NEXT: shrl %cl, %esi
+; X86-SLOW-NEXT: orl %eax, %esi
+; X86-SLOW-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SLOW-NEXT: movl %ebx, %eax
; X86-SLOW-NEXT: movl 56(%ebp), %ecx
-; X86-SLOW-NEXT: shll %cl, %esi
-; X86-SLOW-NEXT: movl %ebx, %edi
+; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SLOW-NEXT: shll %cl, %eax
; X86-SLOW-NEXT: shrl %edi
-; X86-SLOW-NEXT: movl %ecx, %ebx
-; X86-SLOW-NEXT: notb %bl
-; X86-SLOW-NEXT: movl %ebx, %ecx
-; X86-SLOW-NEXT: movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; X86-SLOW-NEXT: movl %edx, %ecx
; X86-SLOW-NEXT: shrl %cl, %edi
-; X86-SLOW-NEXT: orl %esi, %edi
+; X86-SLOW-NEXT: orl %eax, %edi
; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-SLOW-NEXT: movl %esi, %eax
; X86-SLOW-NEXT: movl 56(%ebp), %ecx
; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx
; X86-SLOW-NEXT: shll %cl, %eax
-; X86-SLOW-NEXT: shrl %edx
-; X86-SLOW-NEXT: movl %ebx, %ecx
-; X86-SLOW-NEXT: shrl %cl, %edx
-; X86-SLOW-NEXT: orl %eax, %edx
-; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-SLOW-NEXT: movl %ebx, %eax
+; X86-SLOW-NEXT: shrl %ebx
+; X86-SLOW-NEXT: movl %edx, %ecx
+; X86-SLOW-NEXT: shrl %cl, %ebx
+; X86-SLOW-NEXT: orl %eax, %ebx
; X86-SLOW-NEXT: movl 56(%ebp), %ecx
; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-SLOW-NEXT: shll %cl, %eax
; X86-SLOW-NEXT: shrl %esi
-; X86-SLOW-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
+; X86-SLOW-NEXT: movl %edx, %ecx
; X86-SLOW-NEXT: shrl %cl, %esi
; X86-SLOW-NEXT: orl %eax, %esi
-; X86-SLOW-NEXT: movl 56(%ebp), %ecx
-; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-SLOW-NEXT: shll %cl, %eax
-; X86-SLOW-NEXT: shrl %ebx
-; X86-SLOW-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
-; X86-SLOW-NEXT: shrl %cl, %ebx
-; X86-SLOW-NEXT: orl %eax, %ebx
; X86-SLOW-NEXT: movl 8(%ebp), %eax
-; X86-SLOW-NEXT: movl %ebx, 12(%eax)
-; X86-SLOW-NEXT: movl %esi, 8(%eax)
-; X86-SLOW-NEXT: movl %edx, 4(%eax)
-; X86-SLOW-NEXT: movl %edi, (%eax)
+; X86-SLOW-NEXT: movl %esi, 12(%eax)
+; X86-SLOW-NEXT: movl %ebx, 8(%eax)
+; X86-SLOW-NEXT: movl %edi, 4(%eax)
+; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-SLOW-NEXT: movl %ecx, (%eax)
; X86-SLOW-NEXT: leal -12(%ebp), %esp
; X86-SLOW-NEXT: popl %esi
; X86-SLOW-NEXT: popl %edi
diff --git a/llvm/test/CodeGen/X86/fshr.ll b/llvm/test/CodeGen/X86/fshr.ll
index 544ab7f..c307833 100644
--- a/llvm/test/CodeGen/X86/fshr.ll
+++ b/llvm/test/CodeGen/X86/fshr.ll
@@ -322,79 +322,79 @@ define i128 @var_shift_i128(i128 %x, i128 %y, i128 %z) nounwind {
; X86-SLOW-NEXT: subl $16, %esp
; X86-SLOW-NEXT: movl 24(%ebp), %edx
; X86-SLOW-NEXT: movl 28(%ebp), %esi
-; X86-SLOW-NEXT: movl 48(%ebp), %ebx
+; X86-SLOW-NEXT: movl 48(%ebp), %edi
; X86-SLOW-NEXT: movl 56(%ebp), %eax
; X86-SLOW-NEXT: testb $64, %al
-; X86-SLOW-NEXT: movl 52(%ebp), %edi
+; X86-SLOW-NEXT: movl 52(%ebp), %eax
; X86-SLOW-NEXT: je .LBB6_1
; X86-SLOW-NEXT: # %bb.2:
-; X86-SLOW-NEXT: movl %ebx, (%esp) # 4-byte Spill
-; X86-SLOW-NEXT: movl %edx, %ebx
+; X86-SLOW-NEXT: movl %edi, (%esp) # 4-byte Spill
+; X86-SLOW-NEXT: movl %edx, %edi
; X86-SLOW-NEXT: movl 32(%ebp), %edx
-; X86-SLOW-NEXT: movl %edi, %eax
-; X86-SLOW-NEXT: movl %esi, %edi
+; X86-SLOW-NEXT: movl %eax, %ecx
+; X86-SLOW-NEXT: movl %esi, %eax
; X86-SLOW-NEXT: movl 36(%ebp), %esi
; X86-SLOW-NEXT: jmp .LBB6_3
; X86-SLOW-NEXT: .LBB6_1:
-; X86-SLOW-NEXT: movl 40(%ebp), %eax
-; X86-SLOW-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-SLOW-NEXT: movl 44(%ebp), %eax
+; X86-SLOW-NEXT: movl 40(%ebp), %ecx
+; X86-SLOW-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; X86-SLOW-NEXT: movl 44(%ebp), %ecx
; X86-SLOW-NEXT: .LBB6_3:
-; X86-SLOW-NEXT: movl 56(%ebp), %ecx
-; X86-SLOW-NEXT: testb $32, %cl
+; X86-SLOW-NEXT: movl 56(%ebp), %ebx
+; X86-SLOW-NEXT: testb $32, %bl
; X86-SLOW-NEXT: je .LBB6_4
; X86-SLOW-NEXT: # %bb.5:
-; X86-SLOW-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SLOW-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-SLOW-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SLOW-NEXT: movl %ecx, %ebx
; X86-SLOW-NEXT: jmp .LBB6_6
; X86-SLOW-NEXT: .LBB6_4:
; X86-SLOW-NEXT: movl %edx, %esi
+; X86-SLOW-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-SLOW-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SLOW-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-SLOW-NEXT: movl %eax, %ebx
-; X86-SLOW-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-SLOW-NEXT: movl %ecx, %edi
+; X86-SLOW-NEXT: movl (%esp), %ebx # 4-byte Reload
; X86-SLOW-NEXT: .LBB6_6:
-; X86-SLOW-NEXT: shrl %cl, %eax
-; X86-SLOW-NEXT: movl %eax, %edx
-; X86-SLOW-NEXT: movl %ecx, %eax
-; X86-SLOW-NEXT: notb %al
-; X86-SLOW-NEXT: movl %ebx, %edi
-; X86-SLOW-NEXT: addl %ebx, %ebx
-; X86-SLOW-NEXT: movl %eax, %ecx
-; X86-SLOW-NEXT: shll %cl, %ebx
-; X86-SLOW-NEXT: orl %edx, %ebx
-; X86-SLOW-NEXT: movl %ebx, (%esp) # 4-byte Spill
; X86-SLOW-NEXT: movl 56(%ebp), %ecx
-; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-SLOW-NEXT: shrl %cl, %edi
-; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-SLOW-NEXT: leal (%ebx,%ebx), %edx
-; X86-SLOW-NEXT: movl %eax, %ecx
-; X86-SLOW-NEXT: shll %cl, %edx
-; X86-SLOW-NEXT: orl %edi, %edx
+; X86-SLOW-NEXT: shrl %cl, %ebx
+; X86-SLOW-NEXT: movl %ecx, %edx
+; X86-SLOW-NEXT: notb %dl
+; X86-SLOW-NEXT: movl %edi, %eax
+; X86-SLOW-NEXT: addl %edi, %edi
+; X86-SLOW-NEXT: movl %edx, %ecx
+; X86-SLOW-NEXT: shll %cl, %edi
+; X86-SLOW-NEXT: orl %ebx, %edi
+; X86-SLOW-NEXT: movl %edi, (%esp) # 4-byte Spill
; X86-SLOW-NEXT: movl 56(%ebp), %ecx
; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-SLOW-NEXT: shrl %cl, %ebx
-; X86-SLOW-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SLOW-NEXT: shrl %cl, %eax
; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X86-SLOW-NEXT: leal (%edi,%edi), %ebx
-; X86-SLOW-NEXT: movl %eax, %ecx
+; X86-SLOW-NEXT: movl %edx, %ecx
; X86-SLOW-NEXT: shll %cl, %ebx
-; X86-SLOW-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-SLOW-NEXT: orl %eax, %ebx
; X86-SLOW-NEXT: movl 56(%ebp), %ecx
; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx
; X86-SLOW-NEXT: shrl %cl, %edi
+; X86-SLOW-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-SLOW-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-SLOW-NEXT: leal (%eax,%eax), %edi
+; X86-SLOW-NEXT: movl %edx, %ecx
+; X86-SLOW-NEXT: shll %cl, %edi
+; X86-SLOW-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-SLOW-NEXT: movl 56(%ebp), %ecx
+; X86-SLOW-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SLOW-NEXT: shrl %cl, %eax
; X86-SLOW-NEXT: addl %esi, %esi
-; X86-SLOW-NEXT: movl %eax, %ecx
+; X86-SLOW-NEXT: movl %edx, %ecx
; X86-SLOW-NEXT: shll %cl, %esi
-; X86-SLOW-NEXT: orl %edi, %esi
-; X86-SLOW-NEXT: movl 8(%ebp), %ecx
-; X86-SLOW-NEXT: movl %esi, 12(%ecx)
-; X86-SLOW-NEXT: movl %ebx, 8(%ecx)
-; X86-SLOW-NEXT: movl %edx, 4(%ecx)
-; X86-SLOW-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-SLOW-NEXT: movl %eax, (%ecx)
-; X86-SLOW-NEXT: movl %ecx, %eax
+; X86-SLOW-NEXT: orl %eax, %esi
+; X86-SLOW-NEXT: movl 8(%ebp), %eax
+; X86-SLOW-NEXT: movl %esi, 12(%eax)
+; X86-SLOW-NEXT: movl %edi, 8(%eax)
+; X86-SLOW-NEXT: movl %ebx, 4(%eax)
+; X86-SLOW-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-SLOW-NEXT: movl %ecx, (%eax)
; X86-SLOW-NEXT: leal -12(%ebp), %esp
; X86-SLOW-NEXT: popl %esi
; X86-SLOW-NEXT: popl %edi
diff --git a/llvm/test/CodeGen/X86/sbb.ll b/llvm/test/CodeGen/X86/sbb.ll
index 78d609d..f5a3468 100644
--- a/llvm/test/CodeGen/X86/sbb.ll
+++ b/llvm/test/CodeGen/X86/sbb.ll
@@ -365,3 +365,32 @@ define i32 @uge_sext_add(i32 %0, i32 %1, i32 %2) {
%6 = add nsw i32 %5, %0
ret i32 %6
}
+
+define i32 @sub_sub_ugt(i32 %a, i32 %b) {
+; CHECK-LABEL: sub_sub_ugt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: cmpl %edi, %esi
+; CHECK-NEXT: sbbl %esi, %eax
+; CHECK-NEXT: retq
+ %cmp = icmp ugt i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ %sub = sub i32 %a, %b
+ %res = sub i32 %sub, %conv
+ ret i32 %res
+}
+
+define i32 @sub_sub_ult(i32 %a, i32 %b) {
+; CHECK-LABEL: sub_sub_ult:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: cmpl %edi, %esi
+; CHECK-NEXT: sbbl %esi, %eax
+; CHECK-NEXT: retq
+ %cmp = icmp ult i32 %b, %a
+ %conv = zext i1 %cmp to i32
+ %sub = sub i32 %a, %b
+ %res = sub i32 %sub, %conv
+ ret i32 %res
+}
+
diff --git a/llvm/test/CodeGen/X86/shift-i128.ll b/llvm/test/CodeGen/X86/shift-i128.ll
index 7462c77..049ee47 100644
--- a/llvm/test/CodeGen/X86/shift-i128.ll
+++ b/llvm/test/CodeGen/X86/shift-i128.ll
@@ -613,8 +613,7 @@ define void @test_shl_v2i128(<2 x i128> %x, <2 x i128> %a, ptr nocapture %r) nou
; i686-NEXT: shldl %cl, %esi, %ebx
; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; i686-NEXT: movl %edi, %esi
-; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; i686-NEXT: movl %eax, %ecx
+; i686-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; i686-NEXT: shll %cl, %esi
; i686-NEXT: shldl %cl, %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; i686-NEXT: negl %edx